code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
__lowerCAmelCase : int = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
__lowerCAmelCase : List[Any] = logging.getLogger()
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__UpperCAmelCase = parser.parse_args()
return args.f
def lowerCAmelCase ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any="eval" ):
"""simple docstring"""
__UpperCAmelCase = os.path.join(UpperCamelCase__ , f"""{split}_results.json""" )
if os.path.exists(UpperCamelCase__ ):
with open(UpperCamelCase__ , '''r''' ) as f:
return json.load(UpperCamelCase__ )
raise ValueError(f"""can't find {path}""" )
__lowerCAmelCase : str = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class A ( UpperCAmelCase ):
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--eval_steps=2
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
with patch.object(__a , '''argv''' , __a ):
run_flax_glue.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
@slow
def snake_case__ ( self : Optional[Any] ) -> Optional[int]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_clm_flax.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__a , '''argv''' , __a ):
run_clm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertLess(result['''eval_perplexity'''] , 1_0_0 )
@slow
def snake_case__ ( self : Dict ) -> List[str]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--test_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=8
--do_train
--do_eval
--do_predict
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(__a , '''argv''' , __a ):
run_summarization_flax.main()
__UpperCAmelCase = get_results(__a , split='''test''' )
self.assertGreaterEqual(result['''test_rouge1'''] , 1_0 )
self.assertGreaterEqual(result['''test_rouge2'''] , 2 )
self.assertGreaterEqual(result['''test_rougeL'''] , 7 )
self.assertGreaterEqual(result['''test_rougeLsum'''] , 7 )
@slow
def snake_case__ ( self : Any ) -> List[Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--logging_steps 2 --eval_steps 2
--do_train
--do_eval
--num_train_epochs=1
""".split()
with patch.object(__a , '''argv''' , __a ):
run_mlm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertLess(result['''eval_perplexity'''] , 4_2 )
@slow
def snake_case__ ( self : Dict ) -> str:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_t5_mlm_flax.py
--model_name_or_path t5-small
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--max_seq_length 128
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--num_train_epochs 2
--logging_steps 2 --eval_steps 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
with patch.object(__a , '''argv''' , __a ):
run_ta_mlm_flax.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.4_2 )
@slow
def snake_case__ ( self : Dict ) -> Tuple:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_flax_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--logging_steps 2 --eval_steps 2
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
with patch.object(__a , '''argv''' , __a ):
run_flax_ner.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.7_5 )
self.assertGreaterEqual(result['''eval_f1'''] , 0.3 )
@slow
def snake_case__ ( self : Optional[Any] ) -> List[Any]:
__UpperCAmelCase = self.get_auto_remove_tmp_dir()
__UpperCAmelCase = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--num_train_epochs=3
--warmup_steps=2
--do_train
--do_eval
--logging_steps 2 --eval_steps 2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(__a , '''argv''' , __a ):
run_qa.main()
__UpperCAmelCase = get_results(__a )
self.assertGreaterEqual(result['''eval_f1'''] , 3_0 )
self.assertGreaterEqual(result['''eval_exact'''] , 3_0 )
| 262
|
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class A :
def __init__( self : List[str] , __a : Any , __a : int=9_9 , __a : Any=1_3 , __a : Tuple=7 , __a : Tuple=9 , __a : Tuple=True , __a : Union[str, Any]=True , __a : List[Any]=False , __a : Optional[Any]=3_2 , __a : str=5 , __a : Optional[int]=4 , __a : Union[str, Any]=3_7 , __a : List[str]=8 , __a : Optional[int]=0.1 , __a : List[str]=0.0_0_2 , __a : List[Any]=1 , __a : str=0 , __a : Dict=0 , __a : int=None , __a : List[Any]=None , ) -> Tuple:
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = encoder_seq_length
__UpperCAmelCase = decoder_seq_length
# For common tests
__UpperCAmelCase = self.decoder_seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_attention_mask
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = d_ff
__UpperCAmelCase = relative_attention_num_buckets
__UpperCAmelCase = dropout_rate
__UpperCAmelCase = initializer_factor
__UpperCAmelCase = eos_token_id
__UpperCAmelCase = pad_token_id
__UpperCAmelCase = decoder_start_token_id
__UpperCAmelCase = None
__UpperCAmelCase = decoder_layers
def snake_case__ ( self : Union[str, Any] ) -> int:
return TaConfig.from_pretrained('''google/umt5-base''' )
def snake_case__ ( self : List[Any] , __a : List[str] , __a : str , __a : Optional[int] , __a : List[Any]=None , __a : List[Any]=None , __a : Any=None , __a : str=None , __a : Any=None , ) -> List[Any]:
if attention_mask is None:
__UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
__UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
__UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__a )
if decoder_head_mask is None:
__UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__a )
if cross_attn_head_mask is None:
__UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__a )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case__ ( self : List[str] ) -> Dict:
__UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
__UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
__UpperCAmelCase = self.get_config()
__UpperCAmelCase = config.num_attention_heads
__UpperCAmelCase = self.prepare_inputs_dict(__a , __a , __a )
return config, input_dict
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
__UpperCAmelCase , __UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : int ) -> Optional[int]:
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__ ( self : Optional[int] ) -> Any:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__ ( self : int , __a : Any , __a : Union[str, Any] , __a : List[Any] , __a : Dict , __a : Optional[Any] , __a : int , ) -> List[Any]:
__UpperCAmelCase = UMTaModel(config=__a )
model.to(__a )
model.eval()
__UpperCAmelCase = model(
input_ids=__a , decoder_input_ids=__a , attention_mask=__a , decoder_attention_mask=__a , )
__UpperCAmelCase = model(input_ids=__a , decoder_input_ids=__a )
__UpperCAmelCase = result.last_hidden_state
__UpperCAmelCase = result.past_key_values
__UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__a ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def snake_case__ ( self : List[str] , __a : Any , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : Dict , __a : Any , ) -> Optional[Any]:
__UpperCAmelCase = UMTaModel(config=__a ).get_decoder().to(__a ).eval()
# first forward pass
__UpperCAmelCase = model(__a , use_cache=__a )
__UpperCAmelCase = model(__a )
__UpperCAmelCase = model(__a , use_cache=__a )
self.parent.assertTrue(len(__a ) == len(__a ) )
self.parent.assertTrue(len(__a ) == len(__a ) + 1 )
__UpperCAmelCase , __UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
__UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__UpperCAmelCase = model(__a )['''last_hidden_state''']
__UpperCAmelCase = model(__a , past_key_values=__a )['''last_hidden_state''']
# select random slice
__UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
__UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__a , __a , atol=1e-3 ) )
def snake_case__ ( self : List[Any] , __a : Union[str, Any] , __a : Dict , ) -> Optional[int]:
__UpperCAmelCase = UMTaModel(config=__a ).to(__a ).half().eval()
__UpperCAmelCase = model(**__a )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(__a ).any().item() )
@require_torch
class A ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
a_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
'''conversational''': UMTaForConditionalGeneration,
'''feature-extraction''': UMTaModel,
'''summarization''': UMTaForConditionalGeneration,
'''text2text-generation''': UMTaForConditionalGeneration,
'''translation''': UMTaForConditionalGeneration,
'''question-answering''': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = True
a_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ = [0.8, 0.9]
def snake_case__ ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def snake_case__ ( self : str ) -> Optional[int]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__a )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__a , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__a , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def snake_case__ ( self : Union[str, Any] ) -> List[str]:
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__a )
def snake_case__ ( self : List[Any] ) -> str:
__UpperCAmelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
__UpperCAmelCase = config_and_inputs[0]
__UpperCAmelCase = UMTaForConditionalGeneration(__a ).eval()
model.to(__a )
__UpperCAmelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=__a ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=__a ),
}
for attn_name, (name, mask) in zip(__a , head_masking.items() ):
__UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
__UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__a )
__UpperCAmelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=__a , return_dict_in_generate=__a , **__a , )
# We check the state of decoder_attentions and cross_attentions just from the last step
__UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def snake_case__ ( self : Optional[int] ) -> int:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class A ( unittest.TestCase ):
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def snake_case__ ( self : Any ) -> int:
__UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=__a ).to(__a )
__UpperCAmelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=__a , legacy=__a )
__UpperCAmelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
__UpperCAmelCase = tokenizer(__a , return_tensors='''pt''' , padding=__a ).input_ids
# fmt: off
__UpperCAmelCase = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__a , __a )
__UpperCAmelCase = model.generate(input_ids.to(__a ) )
__UpperCAmelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
__UpperCAmelCase = tokenizer.batch_decode(__a )
self.assertEqual(__a , __a )
| 262
| 1
|
# This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = StableDiffusionControlNetImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({'''control_image'''} )
snake_case_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
__lowerCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
torch.manual_seed(0 )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> Dict:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = 2
__lowerCamelCase = randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , )
__lowerCamelCase = floats_tensor(control_image.shape , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowercase_ ( self ) -> str:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = StableDiffusionControlNetImgaImgPipeline
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
snake_case_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
snake_case_ = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
torch.manual_seed(0 )
def init_weights(lowerCamelCase__ ):
if isinstance(lowerCamelCase__ , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
__lowerCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase__ )
torch.manual_seed(0 )
__lowerCamelCase = ControlNetModel(
block_out_channels=(32, 64) , layers_per_block=2 , in_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , cross_attention_dim=32 , conditioning_embedding_out_channels=(16, 32) , )
controlneta.controlnet_down_blocks.apply(lowerCamelCase__ )
torch.manual_seed(0 )
__lowerCamelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , )
torch.manual_seed(0 )
__lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
__lowerCamelCase = CLIPTextModel(lowerCamelCase__ )
__lowerCamelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase = MultiControlNetModel([controlneta, controlneta] )
__lowerCamelCase = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = 2
__lowerCamelCase = [
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , ),
randn_tensor(
(1, 3, 32 * controlnet_embedder_scale_factor, 32 * controlnet_embedder_scale_factor) , generator=lowerCamelCase__ , device=torch.device(lowerCamelCase__ ) , ),
]
__lowerCamelCase = floats_tensor(control_image[0].shape , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase = Image.fromarray(np.uinta(lowerCamelCase__ ) ).convert('RGB' ).resize((64, 64) )
__lowerCamelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
__lowerCamelCase = 10.0
__lowerCamelCase = 4
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = steps
__lowerCamelCase = scale
__lowerCamelCase = pipe(**lowerCamelCase__ )[0]
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = steps
__lowerCamelCase = scale
__lowerCamelCase = pipe(**lowerCamelCase__ , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = steps
__lowerCamelCase = scale
__lowerCamelCase = pipe(**lowerCamelCase__ , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = steps
__lowerCamelCase = scale
__lowerCamelCase = pipe(**lowerCamelCase__ , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(lowerCamelCase__ )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = ControlNetModel.from_pretrained('lllyasviel/sd-controlnet-canny' )
__lowerCamelCase = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' , safety_checker=lowerCamelCase__ , controlnet=lowerCamelCase__ )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.Generator(device='cpu' ).manual_seed(0 )
__lowerCamelCase = 'evil space-punk bird'
__lowerCamelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ).resize((512, 512) )
__lowerCamelCase = load_image(
'https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png' ).resize((512, 512) )
__lowerCamelCase = pipe(
lowerCamelCase__ , lowerCamelCase__ , control_image=lowerCamelCase__ , generator=lowerCamelCase__ , output_type='np' , num_inference_steps=50 , strength=0.6 , )
__lowerCamelCase = output.images[0]
assert image.shape == (512, 512, 3)
__lowerCamelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy' )
assert np.abs(expected_image - image ).max() < 9e-2
| 702
|
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
__A = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
__A = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f'''{len(upper_files)} files contain uppercase characters:''')
print("\n".join(upper_files) + "\n")
__A = [file for file in filepaths if " " in file]
if space_files:
print(f'''{len(space_files)} files contain space characters:''')
print("\n".join(space_files) + "\n")
__A = [file for file in filepaths if "-" in file]
if hyphen_files:
print(f'''{len(hyphen_files)} files contain hyphen characters:''')
print("\n".join(hyphen_files) + "\n")
__A = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f'''{len(nodir_files)} files are not in a directory:''')
print("\n".join(nodir_files) + "\n")
__A = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 167
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase : Tuple = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 58
|
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCAmelCase : Dict = logging.get_logger(__name__)
def __lowerCAmelCase ( __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : List[str] = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : int = downstream_dict["""projector.weight"""]
snake_case_ : Optional[int] = downstream_dict["""projector.bias"""]
snake_case_ : List[Any] = downstream_dict["""model.post_net.linear.weight"""]
snake_case_ : Union[str, Any] = downstream_dict["""model.post_net.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : str ):
'''simple docstring'''
snake_case_ : int = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""model.linear.weight"""]
snake_case_ : int = downstream_dict["""model.linear.bias"""]
return model
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ):
'''simple docstring'''
snake_case_ : Optional[int] = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
snake_case_ : Any = downstream_dict["""connector.weight"""]
snake_case_ : str = downstream_dict["""connector.bias"""]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case_ : Dict = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
snake_case_ : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
snake_case_ : str = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""]
snake_case_ : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""]
snake_case_ : int = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""]
snake_case_ : List[str] = downstream_dict["""objective.W"""]
return model
@torch.no_grad()
def __lowerCAmelCase ( __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple ):
'''simple docstring'''
snake_case_ : Any = torch.load(__UpperCamelCase , map_location="""cpu""" )
snake_case_ : Any = checkpoint["""Downstream"""]
snake_case_ : Optional[Any] = WavaVecaConfig.from_pretrained(__UpperCamelCase )
snake_case_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
snake_case_ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("""ForSequenceClassification""" ):
snake_case_ : Tuple = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForAudioFrameClassification""" ):
snake_case_ : Union[str, Any] = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("""ForXVector""" ):
snake_case_ : List[str] = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
snake_case_ : List[Any] = checkpoint["""Featurizer"""]["""weights"""]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
__lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
__lowerCAmelCase : Dict = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 58
| 1
|
"""simple docstring"""
from math import ceil, sqrt
def lowercase ( __snake_case : int = 1_0_0_0_0_0_0 ):
lowercase_ : Tuple = 0
for outer_width in range(3 , (limit // 4) + 2 ):
if outer_width**2 > limit:
lowercase_ : int = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 )
else:
lowercase_ : int = 1
if (outer_width - hole_width_lower_bound) % 2:
hole_width_lower_bound += 1
answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1
return answer
if __name__ == "__main__":
print(F"""{solution() = }""")
| 141
|
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__A : List[Any] = logging.getLogger(__name__)
if __name__ == "__main__":
__A : int = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=30_522, type=int)
__A : Optional[int] = parser.parse_args()
logger.info(F"""Loading data from {args.data_file}""")
with open(args.data_file, '''rb''') as fp:
__A : List[str] = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
__A : Optional[Any] = Counter()
for tk_ids in data:
counter.update(tk_ids)
__A : Dict = [0] * args.vocab_size
for k, v in counter.items():
__A : Union[str, Any] = v
logger.info(F"""Dump to {args.token_counts_dump}""")
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 141
| 1
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def UpperCamelCase ( __lowercase : List[Any] ):
'''simple docstring'''
A_ : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(__lowercase ,__lowercase )
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
A_ , A_ : Dict = emb.weight.shape
A_ : Optional[Any] = nn.Linear(__lowercase ,__lowercase ,bias=__lowercase )
A_ : Optional[int] = emb.weight.data
return lin_layer
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : Optional[int]=None ):
'''simple docstring'''
A_ : List[str] = {}
for old_key in state_dict.keys():
A_ : Dict = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
A_ : Union[str, Any] = key.replace('moe_layer.experts.0' ,f'''ffn.experts.expert_{expert_idx}''' )
else:
A_ : Dict = key.replace('moe_layer.experts.' ,'ffn.experts.expert_' )
if "gate" in key:
A_ : Dict = key.replace('.moe_layer.gate.wg' ,'.ffn.router.classifier' )
if "fc2" and "experts" not in key:
A_ : List[Any] = key.replace('.fc2.' ,'.ffn.fc2.' )
if "fc1" and "experts" not in key:
A_ : Optional[Any] = key.replace('.fc1.' ,'.ffn.fc1.' )
if ".encoder_attn." in key:
A_ : Optional[int] = key.replace('.encoder_attn.' ,'.cross_attention.' )
if "encoder_attn_layer_norm" in key:
A_ : Optional[Any] = key.replace('encoder_attn_layer_norm' ,'cross_attention_layer_norm' )
if "final_layer_norm" in key:
A_ : Dict = key.replace('final_layer_norm' ,'ff_layer_norm' )
A_ : List[Any] = state_dict[old_key]
return new_dict
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : List[Any] ,__lowercase : str ,__lowercase : Optional[int] ,__lowercase : str = WEIGHTS_NAME ):
'''simple docstring'''
A_ : List[str] = []
A_ : int = 0
os.makedirs(__lowercase ,exist_ok=__lowercase )
for expert in range(__lowercase ):
A_ : Optional[int] = switch_checkpoint_path + f'''-rank-{expert}.pt'''
if os.path.isfile(__lowercase ):
A_ : Optional[Any] = torch.load(__lowercase )['model']
remove_ignore_keys_(__lowercase )
A_ : List[Any] = rename_fairseq_keys(__lowercase ,__lowercase )
A_ : Any = os.path.join(
__lowercase ,weights_name.replace('.bin' ,f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
torch.save(__lowercase ,__lowercase )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__lowercase )[0]].dtype )
# Add the last block
A_ : List[str] = os.path.join(__lowercase ,weights_name.replace('.bin' ,f'''-{len(__lowercase )+1:05d}-of-???.bin''' ) )
A_ : Any = torch.load(switch_checkpoint_path + '-shared.pt' )['model']
remove_ignore_keys_(__lowercase )
A_ : Tuple = rename_fairseq_keys(__lowercase ,__lowercase )
A_ : str = shared_weights['decoder.embed_tokens.weight']
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__lowercase ) == 1:
A_ : Any = os.path.join(__lowercase ,__lowercase )
torch.save(__lowercase ,__lowercase )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__lowercase ,__lowercase )
# Otherwise, let's build the index
A_ : Tuple = {}
for idx, shard in enumerate(__lowercase ):
A_ : str = weights_name.replace('.bin' ,f'''-{idx+1:05d}-of-{len(__lowercase ):05d}.bin''' )
A_ : str = os.path.join(__lowercase ,weights_name.replace('.bin' ,f'''-{idx+1:05d}-of-???.bin''' ) )
os.rename(__lowercase ,os.path.join(__lowercase ,__lowercase ) )
for key in shard:
A_ : Dict = shard_file
# Add the metadata
A_ : Dict = {'total_size': total_size}
A_ : Dict = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(__lowercase ,__lowercase ) ,'w' ,encoding='utf-8' ) as f:
A_ : str = json.dumps(__lowercase ,indent=2 ,sort_keys=__lowercase ) + '\n'
f.write(__lowercase )
return metadata, index
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--nllb_moe_checkpoint_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase ,_UpperCAmelCase = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_UpperCAmelCase = NllbMoeConfig.from_pretrained(
"""facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_UpperCAmelCase = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("""Done""")
model.save_pretrained(args.pytorch_dump_folder_path)
| 558
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase ( __A , __A ):
'''simple docstring'''
lowerCamelCase_ = '''bit'''
lowerCamelCase_ = ['''preactivation''', '''bottleneck''']
lowerCamelCase_ = ['''SAME''', '''VALID''']
def __init__( self , lowercase=3 , lowercase=6_4 , lowercase=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , lowercase=[3, 4, 6, 3] , lowercase="preactivation" , lowercase="relu" , lowercase=None , lowercase=3_2 , lowercase=0.0 , lowercase=False , lowercase=3_2 , lowercase=1 , lowercase=None , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(**lowercase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {','.join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A_ : Tuple = global_padding.upper()
else:
raise ValueError(F'''Padding strategy {global_padding} not supported''' )
A_ : Any = num_channels
A_ : Any = embedding_size
A_ : List[Any] = hidden_sizes
A_ : int = depths
A_ : Union[str, Any] = layer_type
A_ : List[Any] = hidden_act
A_ : Tuple = global_padding
A_ : List[str] = num_groups
A_ : int = drop_path_rate
A_ : str = embedding_dynamic_padding
A_ : Dict = output_stride
A_ : Any = width_factor
A_ : int = ['stem'] + [F'''stage{idx}''' for idx in range(1 , len(lowercase ) + 1 )]
A_ , A_ : List[Any] = get_aligned_output_features_output_indices(
out_features=lowercase , out_indices=lowercase , stage_names=self.stage_names )
| 558
| 1
|
import math
from collections.abc import Iterator
from itertools import takewhile
def _a ( __lowercase ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__lowercase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( ) -> Iterator[int]:
"""simple docstring"""
__UpperCamelCase = 2
while True:
if is_prime(__lowercase ):
yield num
num += 1
def _a ( __lowercase = 200_0000 ) -> int:
"""simple docstring"""
return sum(takewhile(lambda __lowercase : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 567
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _a ( ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=__lowercase , default=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=__lowercase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=__lowercase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=__lowercase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=__lowercase , default=0 , help='cuda_id.' , )
__UpperCamelCase = parser.parse_args()
return args
def _a ( __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
if not len(__lowercase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
__UpperCamelCase , __UpperCamelCase = imgs[0].size
__UpperCamelCase = Image.new('RGB' , size=(cols * w, rows * h) )
__UpperCamelCase , __UpperCamelCase = grid.size
for i, img in enumerate(__lowercase ):
grid.paste(__lowercase , box=(i % cols * w, i // cols * h) )
return grid
def _a ( __lowercase , __lowercase="robotic cat with wings" , __lowercase=7.5 , __lowercase=50 , __lowercase=1 , __lowercase=42 , ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = torch.Generator(pipeline.device ).manual_seed(__lowercase )
__UpperCamelCase = pipeline(
__lowercase , guidance_scale=__lowercase , num_inference_steps=__lowercase , generator=__lowercase , num_images_per_prompt=__lowercase , ).images
__UpperCamelCase = int(math.sqrt(__lowercase ) )
__UpperCamelCase = image_grid(__lowercase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
_snake_case = parse_args()
# Load models and create wrapper for stable diffusion
_snake_case = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
_snake_case = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
_snake_case = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
_snake_case = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
_snake_case = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
_snake_case = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
_snake_case = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
_snake_case = unet.to(torch.device('cuda', args.cuda_id))
_snake_case = pipeline.to(unet.device)
_snake_case , _snake_case = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
_snake_case = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 567
| 1
|
from math import ceil
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Dict ):
__lowerCAmelCase = list(range(0, lowerCAmelCase_ ) )
__lowerCAmelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__lowerCAmelCase = []
for i in device_map_blocks:
if device_map_blocks.count(lowerCAmelCase_ ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(lowerCAmelCase_ )
# Missing blocks
__lowerCAmelCase = [i for i in blocks if i not in device_map_blocks]
__lowerCAmelCase = [i for i in device_map_blocks if i not in blocks]
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(lowerCAmelCase_ ) )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(lowerCAmelCase_ ) )
def a_ ( lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = list(range(lowerCAmelCase_ ) )
__lowerCAmelCase = int(ceil(n_layers / len(lowerCAmelCase_ ) ) )
__lowerCAmelCase = [layers[i : i + n_blocks] for i in range(0, lowerCAmelCase_, lowerCAmelCase_ )]
return dict(zip(lowerCAmelCase_, lowerCAmelCase_ ) )
| 53
|
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class _A( yaml.SafeLoader ):
"""simple docstring"""
def UpperCAmelCase_ ( self , _A ):
__A : Optional[int] = [self.constructed_objects[key_node] for key_node, _ in node.value]
__A : Dict = [tuple(_A ) if isinstance(_A , _A ) else key for key in keys]
__A : Tuple = Counter(_A )
__A : Optional[int] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def UpperCAmelCase_ ( self , _A , _A=False ):
__A : Union[str, Any] = super().construct_mapping(_A , deep=_A )
self._check_no_duplicates_on_constructed_node(_A )
return mapping
def _SCREAMING_SNAKE_CASE ( a ) -> Tuple[Optional[str], str]:
__A : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
__A : List[Any] = full_content[1:].index('---' ) + 1
__A : Dict = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(a )
class _A( snake_case__ ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = {'''train_eval_index'''} # train-eval-index in the YAML metadata
@classmethod
def UpperCAmelCase_ ( cls , _A ):
with open(_A , encoding='utf-8' ) as readme_file:
__A , __A : Any = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(_A )
else:
return cls()
def UpperCAmelCase_ ( self , _A ):
if path.exists():
with open(_A , encoding='utf-8' ) as readme_file:
__A : Union[str, Any] = readme_file.read()
else:
__A : List[Any] = None
__A : Any = self._to_readme(_A )
with open(_A , 'w' , encoding='utf-8' ) as readme_file:
readme_file.write(_A )
def UpperCAmelCase_ ( self , _A = None ):
if readme_content is not None:
__A , __A : str = _split_yaml_from_readme(_A )
__A : Any = '---\n' + self.to_yaml_string() + '---\n' + content
else:
__A : List[Any] = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def UpperCAmelCase_ ( cls , _A ):
__A : Optional[int] = yaml.load(_A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
__A : int = {
(key.replace('-' , '_' ) if key.replace('-' , '_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**_A )
def UpperCAmelCase_ ( self ):
return yaml.safe_dump(
{
(key.replace('_' , '-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=_A , allow_unicode=_A , encoding='utf-8' , ).decode('utf-8' )
UpperCAmelCase : List[Any] = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
UpperCAmelCase : Any = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
UpperCAmelCase : Any = ap.parse_args()
UpperCAmelCase : Any = Path(args.readme_filepath)
UpperCAmelCase : int = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 239
| 0
|
'''simple docstring'''
import operator as op
UpperCamelCase_ = '''scaler.pt'''
UpperCamelCase_ = '''pytorch_model'''
UpperCamelCase_ = '''random_states'''
UpperCamelCase_ = '''optimizer'''
UpperCamelCase_ = '''scheduler'''
UpperCamelCase_ = '''pytorch_model.bin'''
UpperCamelCase_ = '''pytorch_model.bin.index.json'''
UpperCamelCase_ = '''model.safetensors'''
UpperCamelCase_ = '''model.safetensors.index.json'''
UpperCamelCase_ = '''1.10.2'''
UpperCamelCase_ = '''py38'''
UpperCamelCase_ = '''4.17.0'''
UpperCamelCase_ = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
UpperCamelCase_ = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
UpperCamelCase_ = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
UpperCamelCase_ = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
UpperCamelCase_ = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
UpperCamelCase_ = '''2.0.1'''
UpperCamelCase_ = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
UpperCamelCase_ = ['''default''', '''reduce-overhead''', '''max-autotune''']
UpperCamelCase_ = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
UpperCamelCase_ = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
UpperCamelCase_ = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
UpperCamelCase_ = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 320
|
'''simple docstring'''
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(UpperCAmelCase__ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 320
| 1
|
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any=13 , lowerCAmelCase__ : Optional[Any]=7 , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : str=99 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[Any]=36 , lowerCAmelCase__ : List[Any]=6 , lowerCAmelCase__ : Any=6 , lowerCAmelCase__ : str=6 , lowerCAmelCase__ : Union[str, Any]=37 , lowerCAmelCase__ : Optional[int]="gelu" , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Union[str, Any]=512 , lowerCAmelCase__ : List[str]=16 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : str=None , ) -> int:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = embedding_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_hidden_groups
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = scope
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def snake_case__ ( self : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = AlbertModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase = AlbertForPreTraining(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , sentence_order_label=lowerCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def snake_case__ ( self : str , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : int ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertForMaskedLM(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = AlbertForQuestionAnswering(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , start_positions=lowerCAmelCase__ , end_positions=lowerCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForSequenceClassification(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.num_labels
_UpperCamelCase = AlbertForTokenClassification(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def snake_case__ ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ) -> str:
'''simple docstring'''
_UpperCamelCase = self.num_choices
_UpperCamelCase = AlbertForMultipleChoice(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
_UpperCamelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCamelCase = model(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , labels=lowerCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def snake_case__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Union[str, Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_snake_case : List[Any] = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_snake_case : List[Any] = True
def snake_case__ ( self : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any=False ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = super()._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
if return_labels:
if model_class in get_values(lowerCAmelCase__ ):
_UpperCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=lowerCAmelCase__ )
_UpperCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__ )
return inputs_dict
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = AlbertModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def snake_case__ ( self : int ) -> str:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def snake_case__ ( self : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase__ )
def snake_case__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Dict:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase__ )
def snake_case__ ( self : Tuple ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCamelCase = type
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
@slow
def snake_case__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = AlbertModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = AlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )[0]
_UpperCamelCase = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCAmelCase__ )
_UpperCamelCase = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase__ , atol=1e-4 ) )
| 98
|
from __future__ import annotations
import requests
def __a ( __UpperCAmelCase ):
a__ = f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"
return requests.get(__UpperCAmelCase ).json()
def __a ( __UpperCAmelCase = 10 ):
a__ = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty'''
a__ = requests.get(__UpperCAmelCase ).json()[:max_stories]
return [get_hackernews_story(__UpperCAmelCase ) for story_id in story_ids]
def __a ( __UpperCAmelCase = 10 ):
a__ = hackernews_top_stories(__UpperCAmelCase )
return "\n".join('''* [{title}]({url})'''.format(**__UpperCAmelCase ) for story in stories )
if __name__ == "__main__":
print(hackernews_top_stories_as_markdown())
| 194
| 0
|
"""simple docstring"""
import sys
__A = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def UpperCamelCase ( _lowerCAmelCase : str ):
__a = 1
for digit in s:
product *= int(_lowerCAmelCase )
return product
def UpperCamelCase ( _lowerCAmelCase : str = N ):
__a = -sys.maxsize - 1
__a = n[:13]
__a = 13
while cur_index < len(_lowerCAmelCase ) - 13:
if int(n[cur_index] ) >= int(substr[0] ):
__a = substr[1:] + n[cur_index]
cur_index += 1
else:
__a = max(_lowerCAmelCase , str_eval(_lowerCAmelCase ) )
__a = n[cur_index : cur_index + 13]
cur_index += 13
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 173
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""s-JoL/Open-Llama-V1""": """https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json""",
}
class a ( A_ ):
A_ : str = '''open-llama'''
def __init__( self : Tuple , lowerCamelCase_ : Tuple=10_00_00 , lowerCamelCase_ : Union[str, Any]=40_96 , lowerCamelCase_ : Any=1_10_08 , lowerCamelCase_ : Union[str, Any]=32 , lowerCamelCase_ : Optional[Any]=32 , lowerCamelCase_ : Tuple="silu" , lowerCamelCase_ : Dict=20_48 , lowerCamelCase_ : Optional[Any]=0.02 , lowerCamelCase_ : List[Any]=1E-6 , lowerCamelCase_ : Optional[int]=True , lowerCamelCase_ : Tuple=0 , lowerCamelCase_ : Any=1 , lowerCamelCase_ : Any=2 , lowerCamelCase_ : Union[str, Any]=False , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : str=0.1 , lowerCamelCase_ : Any=0.1 , lowerCamelCase_ : Any=True , lowerCamelCase_ : Optional[Any]=True , lowerCamelCase_ : Dict=None , **lowerCamelCase_ : int , ) -> List[Any]:
__a = vocab_size
__a = max_position_embeddings
__a = hidden_size
__a = intermediate_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = initializer_range
__a = rms_norm_eps
__a = use_cache
__a = kwargs.pop(
"""use_memorry_efficient_attention""" , lowerCamelCase_ )
__a = hidden_dropout_prob
__a = attention_dropout_prob
__a = use_stable_embedding
__a = shared_input_output_embedding
__a = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ , tie_word_embeddings=lowerCamelCase_ , **lowerCamelCase_ , )
def lowerCAmelCase_ ( self : Optional[int] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , lowerCamelCase_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"""`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, """
F"""got {self.rope_scaling}""" )
__a = self.rope_scaling.get("""type""" , lowerCamelCase_ )
__a = self.rope_scaling.get("""factor""" , lowerCamelCase_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(lowerCamelCase_ , lowerCamelCase_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 173
| 1
|
"""simple docstring"""
from collections import defaultdict
def lowercase__ ( snake_case_ :int ):
__UpperCAmelCase = 1
__UpperCAmelCase = True
for v in tree[start]:
if v not in visited:
ret += dfs(snake_case_ )
if ret % 2 == 0:
cuts.append(snake_case_ )
return ret
def lowercase__ ( ):
dfs(1 )
if __name__ == "__main__":
_lowercase ,_lowercase : Union[str, Any] = 10, 9
_lowercase : Union[str, Any] = defaultdict(list)
_lowercase : dict[int, bool] = {}
_lowercase : list[int] = []
_lowercase : Tuple = 0
_lowercase : Optional[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 49
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
_lowerCamelCase : Optional[int] = '''
Human: <<task>>
Assistant: '''
_lowerCamelCase : str = '''huggingface-tools/default-prompts'''
_lowerCamelCase : Optional[int] = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def A__ ( __A : Dict , __A : Dict , __A : Optional[Any]="run" ) ->Any:
if prompt_or_repo_id is None:
__A =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __A ) is not None:
return prompt_or_repo_id
__A =cached_file(
__A , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__A , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 184
| 0
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
__lowerCamelCase = logging.get_logger(__name__)
@dataclass
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=6.0 ,__UpperCAmelCase=None ,__UpperCAmelCase=False ,__UpperCAmelCase=False ,__UpperCAmelCase=None ,__UpperCAmelCase="fp4" ,__UpperCAmelCase=False ,**__UpperCAmelCase ,) -> Union[str, Any]:
A__ = load_in_abit
A__ = load_in_abit
A__ = llm_inta_threshold
A__ = llm_inta_skip_modules
A__ = llm_inta_enable_fpaa_cpu_offload
A__ = llm_inta_has_fpaa_weight
A__ = bnb_abit_quant_type
A__ = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
A__ = torch.floataa
elif isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = getattr(__UpperCAmelCase ,__UpperCAmelCase )
elif isinstance(__UpperCAmelCase ,torch.dtype ):
A__ = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def snake_case__ ( self ) -> str:
if not isinstance(self.llm_inta_threshold ,__UpperCAmelCase ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules ,__UpperCAmelCase ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload ,__UpperCAmelCase ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight ,__UpperCAmelCase ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype ,torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type ,__UpperCAmelCase ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant ,__UpperCAmelCase ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def snake_case__ ( self ) -> int:
return self.load_in_abit or self.load_in_abit
def snake_case__ ( self ) -> Optional[int]:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def snake_case__ ( cls ,__UpperCAmelCase ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
A__ = cls(**__UpperCAmelCase )
A__ = []
for key, value in kwargs.items():
if hasattr(__UpperCAmelCase ,__UpperCAmelCase ):
setattr(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
to_remove.append(__UpperCAmelCase )
for key in to_remove:
kwargs.pop(__UpperCAmelCase ,__UpperCAmelCase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def snake_case__ ( self ,__UpperCAmelCase ) -> str:
with open(__UpperCAmelCase ,'w' ,encoding='utf-8' ) as writer:
A__ = self.to_dict()
A__ = json.dumps(__UpperCAmelCase ,indent=2 ,sort_keys=__UpperCAmelCase ) + '\n'
writer.write(__UpperCAmelCase )
def snake_case__ ( self ) -> Dict[str, Any]:
A__ = copy.deepcopy(self.__dict__ )
A__ = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ) -> Union[str, Any]:
return f'''{self.__class__.__name__} {self.to_json_string()}'''
def snake_case__ ( self ,__UpperCAmelCase = True ) -> str:
if use_diff is True:
A__ = self.to_diff_dict()
else:
A__ = self.to_dict()
return json.dumps(__UpperCAmelCase ,indent=2 ,sort_keys=__UpperCAmelCase ) + "\n"
def snake_case__ ( self ) -> Dict[str, Any]:
A__ = self.to_dict()
# get the default config dict
A__ = BitsAndBytesConfig().to_dict()
A__ = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
A__ = value
return serializable_config_dict
| 536
|
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class UpperCamelCase__( enum.Enum ):
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = 2
@add_end_docstrings(__A )
class UpperCamelCase__( __A ):
lowerCAmelCase__ : Optional[Any] = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Any:
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
A__ = None
if self.model.config.prefix is not None:
A__ = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
A__ = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
A__ , A__ , A__ = self._sanitize_parameters(prefix=__UpperCAmelCase ,**self._forward_params )
A__ = {**self._preprocess_params, **preprocess_params}
A__ = {**self._forward_params, **forward_params}
def snake_case__ ( self ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,**__UpperCAmelCase ,) -> Dict:
A__ = {}
if prefix is not None:
A__ = prefix
if prefix:
A__ = self.tokenizer(
__UpperCAmelCase ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
A__ = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']' )
A__ = handle_long_generation
preprocess_params.update(__UpperCAmelCase )
A__ = generate_kwargs
A__ = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
A__ = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
A__ = ReturnType.TENSORS
if return_type is not None:
A__ = return_type
if clean_up_tokenization_spaces is not None:
A__ = clean_up_tokenization_spaces
if stop_sequence is not None:
A__ = self.tokenizer.encode(__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase )
if len(__UpperCAmelCase ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
A__ = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def snake_case__ ( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*__UpperCAmelCase ,**__UpperCAmelCase )
def __call__( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Dict:
return super().__call__(__UpperCAmelCase ,**__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase="" ,__UpperCAmelCase=None ,**__UpperCAmelCase ) -> Dict:
A__ = self.tokenizer(
prefix + prompt_text ,padding=__UpperCAmelCase ,add_special_tokens=__UpperCAmelCase ,return_tensors=self.framework )
A__ = prompt_text
if handle_long_generation == "hole":
A__ = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
A__ = generate_kwargs['max_new_tokens']
else:
A__ = generate_kwargs.get('max_length' ,self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
A__ = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
A__ = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
A__ = inputs['attention_mask'][:, -keep_length:]
return inputs
def snake_case__ ( self ,__UpperCAmelCase ,**__UpperCAmelCase ) -> Optional[Any]:
A__ = model_inputs['input_ids']
A__ = model_inputs.get('attention_mask' ,__UpperCAmelCase )
# Allow empty prompts
if input_ids.shape[1] == 0:
A__ = None
A__ = None
A__ = 1
else:
A__ = input_ids.shape[0]
A__ = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
A__ = generate_kwargs.pop('prefix_length' ,0 )
if prefix_length > 0:
A__ = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
A__ = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
A__ = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
A__ = self.model.generate(input_ids=__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,**__UpperCAmelCase )
A__ = generated_sequence.shape[0]
if self.framework == "pt":
A__ = generated_sequence.reshape(__UpperCAmelCase ,out_b // in_b ,*generated_sequence.shape[1:] )
elif self.framework == "tf":
A__ = tf.reshape(__UpperCAmelCase ,(in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=ReturnType.FULL_TEXT ,__UpperCAmelCase=True ) -> str:
A__ = model_outputs['generated_sequence'][0]
A__ = model_outputs['input_ids']
A__ = model_outputs['prompt_text']
A__ = generated_sequence.numpy().tolist()
A__ = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
A__ = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
A__ = self.tokenizer.decode(
__UpperCAmelCase ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,)
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
A__ = 0
else:
A__ = len(
self.tokenizer.decode(
input_ids[0] ,skip_special_tokens=__UpperCAmelCase ,clean_up_tokenization_spaces=__UpperCAmelCase ,) )
if return_type == ReturnType.FULL_TEXT:
A__ = prompt_text + text[prompt_length:]
else:
A__ = text[prompt_length:]
A__ = {'generated_text': all_text}
records.append(__UpperCAmelCase )
return records
| 536
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Any = '''pegasus'''
lowerCamelCase : Dict = ['''past_key_values''']
lowerCamelCase : int = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : List[Any] , UpperCAmelCase__ : List[Any]=5_0_2_6_5 , UpperCAmelCase__ : str=1_0_2_4 , UpperCAmelCase__ : str=1_2 , UpperCAmelCase__ : str=4_0_9_6 , UpperCAmelCase__ : Union[str, Any]=1_6 , UpperCAmelCase__ : int=1_2 , UpperCAmelCase__ : List[str]=4_0_9_6 , UpperCAmelCase__ : List[str]=1_6 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : str=0.0 , UpperCAmelCase__ : int=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[Any]="gelu" , UpperCAmelCase__ : Optional[Any]=1_0_2_4 , UpperCAmelCase__ : Any=0.1 , UpperCAmelCase__ : Any=0.0 , UpperCAmelCase__ : List[str]=0.0 , UpperCAmelCase__ : Optional[int]=0.02 , UpperCAmelCase__ : Dict=0 , UpperCAmelCase__ : str=False , UpperCAmelCase__ : Optional[int]=0 , UpperCAmelCase__ : Any=1 , UpperCAmelCase__ : Dict=1 , **UpperCAmelCase__ : int , ) -> List[str]:
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = use_cache
lowerCAmelCase = encoder_layers
lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
@property
def __UpperCAmelCase ( self : Optional[int] ) -> int:
return self.encoder_attention_heads
@property
def __UpperCAmelCase ( self : Any ) -> int:
return self.d_model
| 133
|
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__snake_case =Lock()
def a_ ( lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase = min(lowerCamelCase , lowerCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase = max(lowerCamelCase , lowerCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCamelCase )
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = []
lowerCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase = Pipe()
lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=lowerCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase = temp_rs
lowerCAmelCase = temp_rr
for i in range(1 , len(lowerCamelCase ) - 1 ):
lowerCAmelCase = Pipe()
lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=lowerCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase = temp_rs
lowerCAmelCase = temp_rr
process_array_.append(
Process(
target=lowerCamelCase , args=(
len(lowerCamelCase ) - 1,
arr[len(lowerCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCamelCase ) ):
lowerCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a_ ( ):
lowerCAmelCase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*lowerCamelCase )
lowerCAmelCase = odd_even_transposition(lowerCamelCase )
print('Sorted List\n' )
print(*lowerCamelCase )
if __name__ == "__main__":
main()
| 133
| 1
|
"""simple docstring"""
import unittest
from transformers import GPTSwaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
a : str = get_tests_dir("""fixtures/test_sentencepiece_with_bytefallback.model""")
@require_sentencepiece
@require_tokenizers
class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = GPTSwaTokenizer
__lowerCamelCase = False
__lowerCamelCase = True
__lowerCamelCase = False
def UpperCAmelCase_ ( self ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
lowercase__ : Optional[Any]= GPTSwaTokenizer(snake_case__ , eos_token="<unk>" , bos_token="<unk>" , pad_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self , snake_case__ ):
'''simple docstring'''
lowercase__ : int= "This is a test"
lowercase__ : Optional[int]= "This is a test"
return input_text, output_text
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : List[Any]= "<s>"
lowercase__ : List[Any]= 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case__ ) , snake_case__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case__ ) , snake_case__ )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : int= list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(snake_case__ ) , 2000 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 2000 )
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= GPTSwaTokenizer(snake_case__ )
lowercase__ : int= tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case__ , ["โThis", "โis", "โa", "โt", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , [465, 287, 265, 631, 842] )
lowercase__ : Dict= tokenizer.tokenize("I was born in 92000, and this is falsรฉ." )
# fmt: off
self.assertListEqual(
snake_case__ , ["โI", "โwas", "โbor", "n", "โin", "โ", "<0x39>", "2", "0", "0", "0", ",", "โand", "โthis", "โis", "โf", "al", "s", "<0xC3>", "<0xA9>", "."] , )
# fmt: on
lowercase__ : Any= tokenizer.convert_tokens_to_ids(snake_case__ )
self.assertListEqual(
snake_case__ , [262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260] , )
lowercase__ : Dict= tokenizer.convert_ids_to_tokens(snake_case__ )
# fmt: off
self.assertListEqual(
snake_case__ , ["โI", "โwas", "โbor", "n", "โin", "โ", "<0x39>", "2", "0", "0", "0", ",", "โand", "โthis", "โis", "โf", "al", "s", "<0xC3>", "<0xA9>", "."] )
# fmt: on
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Optional[Any]= GPTSwaTokenizer(snake_case__ )
lowercase__ : List[str]= ["This is a test", "I was born in 92000, and this is falsรฉ."]
lowercase__ : Union[str, Any]= [
[465, 287, 265, 631, 842],
[262, 272, 1525, 286, 271, 268, 60, 916, 633, 633, 633, 259, 266, 301, 287, 384, 367, 263, 198, 172, 260],
]
# Test that encode_fast returns the same as tokenize + convert_tokens_to_ids
for text, expected_ids in zip(snake_case__ , snake_case__ ):
self.assertListEqual(tokenizer.encode_fast(snake_case__ ) , snake_case__ )
# Test that decode_fast returns the input text
for text, token_ids in zip(snake_case__ , snake_case__ ):
self.assertEqual(tokenizer.decode_fast(snake_case__ ) , snake_case__ )
@slow
def UpperCAmelCase_ ( self ):
'''simple docstring'''
lowercase__ : Tuple= [
"<|python|>def fibonacci(n)\n if n < 0:\n print(\'Incorrect input\')",
"Hey there, how are you doing this fine day?",
"This is a text with a trailing spaces followed by a dot .",
"Hรคj svรคjs lillebrรถr! =)",
"Det รคr inget fel pรฅ Mr. Cool",
]
# fmt: off
lowercase__ : Optional[int]= {"input_ids": [[63423, 5, 6811, 14954, 282, 816, 3821, 63466, 63425, 63462, 18, 63978, 678, 301, 1320, 63423, 63455, 63458, 18, 63982, 4246, 3940, 1901, 47789, 5547, 18994], [19630, 1100, 63446, 1342, 633, 544, 4488, 593, 5102, 2416, 63495, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1652, 428, 268, 1936, 515, 268, 58593, 22413, 9106, 546, 268, 33213, 63979, 698, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [55130, 63450, 924, 63449, 2249, 4062, 1558, 318, 63504, 21498, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [509, 377, 2827, 2559, 332, 6575, 63443, 26801, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case__ , model_name="AI-Sweden/gpt-sw3-126m" , sequences=snake_case__ , )
| 701
|
"""simple docstring"""
from __future__ import annotations
def lowercase__(A ) ->list[int]: # This function is recursive
"""simple docstring"""
lowercase__ : int= len(A )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
lowercase__ : str= array[0]
lowercase__ : Optional[Any]= False
lowercase__ : Any= 1
lowercase__ : list[int]= []
while not is_found and i < array_length:
if array[i] < pivot:
lowercase__ : Union[str, Any]= True
lowercase__ : List[str]= [element for element in array[i:] if element >= array[i]]
lowercase__ : Union[str, Any]= longest_subsequence(A )
if len(A ) > len(A ):
lowercase__ : List[str]= temp_array
else:
i += 1
lowercase__ : List[str]= [element for element in array[1:] if element >= pivot]
lowercase__ : List[str]= [pivot, *longest_subsequence(A )]
if len(A ) > len(A ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 85
| 0
|
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCamelCase : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
_lowerCamelCase : Optional[int] = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
_lowerCamelCase : List[str] = '''pt''' if is_torch_available() else '''tf'''
@require_sentencepiece
@require_tokenizers
class lowercase ( lowercase_ , unittest.TestCase ):
lowercase__ : str = CamembertTokenizer
lowercase__ : int = CamembertTokenizerFast
lowercase__ : Tuple = True
lowercase__ : Any = True
def __snake_case( self : Tuple ) -> List[str]:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE = CamembertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """<pad>"""
SCREAMING_SNAKE_CASE = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(a__ ) , 1_004 )
def __snake_case( self : Dict ) -> str:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1_005 )
def __snake_case( self : Any ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = CamembertTokenizer(a__ )
tokenizer.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsรฉ."""
SCREAMING_SNAKE_CASE = tokenizer.encode(a__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(a__ , add_special_tokens=a__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(a__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
def __snake_case( self : Any ) -> int:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE = self.get_tokenizer()
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = """I was born in 92000, and this is falsรฉ."""
SCREAMING_SNAKE_CASE = tokenizer.tokenize(a__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
SCREAMING_SNAKE_CASE = tokenizer.encode(a__ , add_special_tokens=a__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE = tokenizer.encode(a__ )
SCREAMING_SNAKE_CASE = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __snake_case( self : Dict ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {"""input_ids""": [[5, 54, 7_196, 297, 30, 23, 776, 18, 11, 3_215, 3_705, 8_252, 22, 3_164, 1_181, 2_116, 29, 16, 813, 25, 791, 3_314, 20, 3_446, 38, 27_575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9_088, 20, 1_517, 8, 22_804, 18_818, 10, 38, 629, 607, 607, 142, 19, 7_196, 867, 56, 10_326, 24, 2_267, 20, 416, 5_072, 15_612, 233, 734, 7, 2_399, 27, 16, 3_015, 1_649, 7, 24, 20, 4_338, 2_399, 27, 13, 3_400, 14, 13, 6_189, 8, 930, 9, 6]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
SCREAMING_SNAKE_CASE = [
"""Le transformeur est un modรจle d'apprentissage profond introduit en 2017, """
"""utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).""",
"""ร l'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus """
"""pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches """
"""telles que la traduction et la synthรจse de texte.""",
]
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=a__ , )
| 403
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def UpperCamelCase ( snake_case__ : int ):
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase ( ):
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :Tuple = """mock-s3-bucket"""
__snake_case :Optional[Any] = f'''s3://{mock_bucket}'''
__snake_case :Optional[int] = extract_path_from_uri(snake_case__ )
assert dataset_path.startswith("""s3://""" ) is False
__snake_case :Union[str, Any] = """./local/path"""
__snake_case :List[str] = extract_path_from_uri(snake_case__ )
assert dataset_path == new_dataset_path
def UpperCamelCase ( snake_case__ : str ):
'''simple docstring'''
__snake_case :List[str] = is_remote_filesystem(snake_case__ )
assert is_remote is True
__snake_case :Any = fsspec.filesystem("""file""" )
__snake_case :Optional[Any] = is_remote_filesystem(snake_case__ )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" ,snake_case__ )
def UpperCamelCase ( snake_case__ : Optional[Any] ,snake_case__ : List[str] ,snake_case__ : int ,snake_case__ : List[Any] ,snake_case__ : Tuple ,snake_case__ : str ,snake_case__ : Optional[Any] ):
'''simple docstring'''
__snake_case :Optional[int] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
__snake_case :Optional[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
__snake_case :Optional[int] = f'''for \'{compression_fs_class.protocol}\' compression protocol, '''
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(snake_case__ )
__snake_case :Optional[Any] = fsspec.filesystem(compression_fs_class.protocol ,fo=snake_case__ )
assert isinstance(snake_case__ ,snake_case__ )
__snake_case :Dict = os.path.basename(snake_case__ )
__snake_case :Dict = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as f, open(snake_case__ ,encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" ,["""zip""", """gzip"""] )
def UpperCamelCase ( snake_case__ : List[Any] ,snake_case__ : List[Any] ,snake_case__ : Dict ):
'''simple docstring'''
__snake_case :Optional[int] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
__snake_case :List[str] = compressed_file_paths[protocol]
__snake_case :Optional[int] = """dataset.jsonl"""
__snake_case :Optional[int] = f'''{protocol}://{member_file_path}::{compressed_file_path}'''
__snake_case , *__snake_case :Optional[Any] = fsspec.get_fs_token_paths(snake_case__ )
assert fs.isfile(snake_case__ )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def UpperCamelCase ( snake_case__ : Optional[int] ,snake_case__ : Dict ,snake_case__ : List[str] ,snake_case__ : Dict ):
'''simple docstring'''
__snake_case :List[Any] = hf_api.dataset_info(snake_case__ ,token=snake_case__ )
__snake_case :List[str] = HfFileSystem(repo_info=snake_case__ ,token=snake_case__ )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(snake_case__ ) as f:
assert hffs.open("""data/text_data.txt""" ,"""r""" ).read() == f.read()
def UpperCamelCase ( ):
'''simple docstring'''
__snake_case :str = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(snake_case__ ,snake_case__ ,clobber=snake_case__ )
with pytest.warns(snake_case__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(snake_case__ ) == 1
assert (
str(warning_info[0].message )
== f'''A filesystem protocol was already set for {protocol} and will be overwritten.'''
)
| 455
| 0
|
from heapq import heappop, heappush
import numpy as np
def a ( A__ , A__ , A__ , A__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = grid.shape
SCREAMING_SNAKE_CASE__ : Tuple = [-1, 1, 0, 0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
SCREAMING_SNAKE_CASE__ : str = [(0, source)], set()
SCREAMING_SNAKE_CASE__ : List[Any] = np.full((rows, cols) , np.inf )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.empty((rows, cols) , dtype=A__ )
SCREAMING_SNAKE_CASE__ : str = None
while queue:
(SCREAMING_SNAKE_CASE__) : Optional[int] = heappop(A__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
SCREAMING_SNAKE_CASE__ : Tuple = []
while (x, y) != source:
path.append((x, y) )
SCREAMING_SNAKE_CASE__ : List[str] = predecessors[x, y]
path.append(A__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A__ ) ):
SCREAMING_SNAKE_CASE__ : str = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
SCREAMING_SNAKE_CASE__ : Tuple = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A__ , (dist + 1, (nx, ny)) )
SCREAMING_SNAKE_CASE__ : str = dist + 1
SCREAMING_SNAKE_CASE__ : List[str] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719
|
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('1.0.0a'):
raise Exception('requires fairseq >= 1.0.0a')
logging.set_verbosity_info()
a_ :Tuple = logging.get_logger(__name__)
a_ :List[str] = 'Hello world! cรฉcรฉ herlolip'
def a ( A__ , A__ , A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = FairseqRobertaModel.from_pretrained(A__ )
roberta.eval() # disable dropout
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.sentence_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=5_1_4 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our RoBERTa config:''' , A__ )
SCREAMING_SNAKE_CASE__ : Tuple = XLMRobertaXLForSequenceClassification(A__ ) if classification_head else XLMRobertaXLForMaskedLM(A__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
SCREAMING_SNAKE_CASE__ : List[str] = roberta_sent_encoder.embed_tokens.weight
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_sent_encoder.embed_positions.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.weight
SCREAMING_SNAKE_CASE__ : int = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
SCREAMING_SNAKE_CASE__ : BertLayer = model.roberta.encoder.layer[i]
SCREAMING_SNAKE_CASE__ : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
SCREAMING_SNAKE_CASE__ : RobertaAttention = layer.attention
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn_layer_norm.weight
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn_layer_norm.bias
# self attention
SCREAMING_SNAKE_CASE__ : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.q_proj.weight
SCREAMING_SNAKE_CASE__ : int = roberta_layer.self_attn.q_proj.bias
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.self_attn.k_proj.weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = roberta_layer.self_attn.k_proj.bias
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.self_attn.v_proj.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta_layer.self_attn.v_proj.bias
# self-attention output
SCREAMING_SNAKE_CASE__ : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
SCREAMING_SNAKE_CASE__ : str = roberta_layer.self_attn.out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
SCREAMING_SNAKE_CASE__ : List[Any] = roberta_layer.final_layer_norm.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.final_layer_norm.bias
# intermediate
SCREAMING_SNAKE_CASE__ : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta_layer.fca.bias
# output
SCREAMING_SNAKE_CASE__ : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
SCREAMING_SNAKE_CASE__ : List[str] = roberta_layer.fca.weight
SCREAMING_SNAKE_CASE__ : Any = roberta_layer.fca.bias
# end of layer
if classification_head:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.classification_heads['''mnli'''].dense.weight
SCREAMING_SNAKE_CASE__ : Any = roberta.model.classification_heads['''mnli'''].dense.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.classification_heads['''mnli'''].out_proj.weight
SCREAMING_SNAKE_CASE__ : Union[str, Any] = roberta.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
SCREAMING_SNAKE_CASE__ : str = roberta.model.encoder.lm_head.dense.weight
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.dense.bias
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.layer_norm.weight
SCREAMING_SNAKE_CASE__ : Dict = roberta.model.encoder.lm_head.layer_norm.bias
SCREAMING_SNAKE_CASE__ : List[str] = roberta.model.encoder.lm_head.weight
SCREAMING_SNAKE_CASE__ : Tuple = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
SCREAMING_SNAKE_CASE__ : torch.Tensor = roberta.encode(A__ ).unsqueeze(0 ) # batch of size 1
SCREAMING_SNAKE_CASE__ : List[str] = model(A__ )[0]
if classification_head:
SCREAMING_SNAKE_CASE__ : Optional[int] = roberta.model.classification_heads['''mnli'''](roberta.extract_features(A__ ) )
else:
SCREAMING_SNAKE_CASE__ : Dict = roberta.model(A__ )[0]
print(our_output.shape , their_output.shape )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.allclose(A__ , A__ , atol=1e-3 )
print('''Do both models output the same tensors?''' , '''๐ฅ''' if success else '''๐ฉ''' )
if not success:
raise Exception('''Something went wRoNg''' )
pathlib.Path(A__ ).mkdir(parents=A__ , exist_ok=A__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--roberta_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--classification_head', action='store_true', help='Whether to convert a final classification head.'
)
a_ :str = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 250
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/deit-base-distilled-patch16-224": (
"https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : List[Any] = """deit"""
def __init__( self :Optional[Any] , lowerCamelCase__ :List[str]=7_68 , lowerCamelCase__ :int=12 , lowerCamelCase__ :Any=12 , lowerCamelCase__ :Optional[Any]=30_72 , lowerCamelCase__ :Optional[Any]="gelu" , lowerCamelCase__ :str=0.0 , lowerCamelCase__ :Tuple=0.0 , lowerCamelCase__ :int=0.02 , lowerCamelCase__ :Tuple=1e-12 , lowerCamelCase__ :Union[str, Any]=2_24 , lowerCamelCase__ :List[Any]=16 , lowerCamelCase__ :List[Any]=3 , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :str=16 , **lowerCamelCase__ :str , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :Dict = num_hidden_layers
UpperCamelCase__ :Optional[int] = num_attention_heads
UpperCamelCase__ :Optional[Any] = intermediate_size
UpperCamelCase__ :Tuple = hidden_act
UpperCamelCase__ :Dict = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :str = initializer_range
UpperCamelCase__ :int = layer_norm_eps
UpperCamelCase__ :Any = image_size
UpperCamelCase__ :int = patch_size
UpperCamelCase__ :List[str] = num_channels
UpperCamelCase__ :Any = qkv_bias
UpperCamelCase__ :Tuple = encoder_stride
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : int = version.parse("""1.11""" )
@property
def __a ( self :List[str] ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __a ( self :Union[str, Any] ):
return 1e-4
| 45
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __a ( __snake_case ):
def __init__( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = "arrow" , **UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
split=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , keep_in_memory=UpperCAmelCase , streaming=UpperCAmelCase , **UpperCAmelCase , )
lowerCAmelCase_ = load_from_cache_file
lowerCAmelCase_ = file_format
lowerCAmelCase_ = Spark(
df=UpperCAmelCase , features=UpperCAmelCase , cache_dir=UpperCAmelCase , working_dir=UpperCAmelCase , **UpperCAmelCase , )
def lowerCamelCase_ ( self ):
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase_ = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 552
| 0
|
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
if "cls_token" in name:
_snake_case = name.replace("cls_token" , "vit.embeddings.cls_token" )
if "mask_token" in name:
_snake_case = name.replace("mask_token" , "decoder.mask_token" )
if "decoder_pos_embed" in name:
_snake_case = name.replace("decoder_pos_embed" , "decoder.decoder_pos_embed" )
if "pos_embed" in name and "decoder" not in name:
_snake_case = name.replace("pos_embed" , "vit.embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_snake_case = name.replace("patch_embed.proj" , "vit.embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
_snake_case = name.replace("patch_embed.norm" , "vit.embeddings.norm" )
if "decoder_blocks" in name:
_snake_case = name.replace("decoder_blocks" , "decoder.decoder_layers" )
if "blocks" in name:
_snake_case = name.replace("blocks" , "vit.encoder.layer" )
if "attn.proj" in name:
_snake_case = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_snake_case = name.replace("attn" , "attention.self" )
if "norm1" in name:
_snake_case = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_snake_case = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_snake_case = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_snake_case = name.replace("mlp.fc2" , "output.dense" )
if "decoder_embed" in name:
_snake_case = name.replace("decoder_embed" , "decoder.decoder_embed" )
if "decoder_norm" in name:
_snake_case = name.replace("decoder_norm" , "decoder.decoder_norm" )
if "decoder_pred" in name:
_snake_case = name.replace("decoder_pred" , "decoder.decoder_pred" )
if "norm.weight" in name and "decoder" not in name:
_snake_case = name.replace("norm.weight" , "vit.layernorm.weight" )
if "norm.bias" in name and "decoder" not in name:
_snake_case = name.replace("norm.bias" , "vit.layernorm.bias" )
return name
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
_snake_case = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
_snake_case = key.split("." )
_snake_case = int(key_split[1] )
if "decoder_blocks" in key:
_snake_case = config.decoder_hidden_size
_snake_case = "decoder.decoder_layers."
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
elif "bias" in key:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
else:
_snake_case = config.hidden_size
_snake_case = "vit.encoder.layer."
if "weight" in key:
_snake_case = val[:dim, :]
_snake_case = val[dim : dim * 2, :]
_snake_case = val[-dim:, :]
elif "bias" in key:
_snake_case = val[:dim]
_snake_case = val[dim : dim * 2]
_snake_case = val[-dim:]
else:
_snake_case = val
return orig_state_dict
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = ViTMAEConfig()
if "large" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
elif "huge" in checkpoint_url:
_snake_case = 14
_snake_case = 12_80
_snake_case = 51_20
_snake_case = 32
_snake_case = 16
_snake_case = ViTMAEForPreTraining(SCREAMING_SNAKE_CASE__ )
_snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_snake_case = ViTMAEImageProcessor(size=config.image_size )
_snake_case = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
model.eval()
_snake_case = "https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg"
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
_snake_case = ViTMAEImageProcessor(size=config.image_size )
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_snake_case = model(**SCREAMING_SNAKE_CASE__ )
_snake_case = outputs.logits
if "large" in checkpoint_url:
_snake_case = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
_snake_case = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
_snake_case = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
__magic_name__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth""",
type=str,
help="""URL of the checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__magic_name__ : List[str] = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 368
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = pd.read_csv("""sample_data.csv""", header=None)
__magic_name__ : Optional[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
__magic_name__ : str = df.iloc[:, 1:2]
__magic_name__ : Dict = actual_data.values.reshape(len_data, 1)
__magic_name__ : Tuple = MinMaxScaler().fit_transform(actual_data)
__magic_name__ : Union[str, Any] = 10
__magic_name__ : Optional[int] = 5
__magic_name__ : Any = 20
__magic_name__ : int = len_data - periods * look_back
__magic_name__ : Union[str, Any] = actual_data[:division]
__magic_name__ : int = actual_data[division - look_back :]
__magic_name__ , __magic_name__ : List[str] = [], []
__magic_name__ , __magic_name__ : Tuple = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
__magic_name__ : List[str] = np.array(train_x)
__magic_name__ : List[str] = np.array(test_x)
__magic_name__ : int = np.array([list(i.ravel()) for i in train_y])
__magic_name__ : int = np.array([list(i.ravel()) for i in test_y])
__magic_name__ : Tuple = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
__magic_name__ : int = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
__magic_name__ : int = model.predict(x_test)
| 368
| 1
|
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCamelCase : Optional[int] = '''true'''
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : Any=82 , _UpperCAmelCase : Any=16 ):
set_seed(42 )
lowerCAmelCase = RegressionModel()
lowerCAmelCase = deepcopy(_UpperCAmelCase )
lowerCAmelCase = RegressionDataset(length=_UpperCAmelCase )
lowerCAmelCase = DataLoader(_UpperCAmelCase , batch_size=_UpperCAmelCase )
model.to(accelerator.device )
lowerCAmelCase = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
return model, ddp_model, dataloader
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Accelerator , _UpperCAmelCase : str=False ):
lowerCAmelCase = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
lowerCAmelCase = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(_UpperCAmelCase : List[str] ):
lowerCAmelCase = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCAmelCase , max_length=_UpperCAmelCase )
return outputs
with accelerator.main_process_first():
lowerCAmelCase = dataset.map(
_UpperCAmelCase , batched=_UpperCAmelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
lowerCAmelCase = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCAmelCase : List[Any] ):
if use_longest:
return tokenizer.pad(_UpperCAmelCase , padding='longest' , return_tensors='pt' )
return tokenizer.pad(_UpperCAmelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(_UpperCAmelCase , shuffle=_UpperCAmelCase , collate_fn=_UpperCAmelCase , batch_size=16 )
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] ):
lowerCAmelCase = Accelerator(dispatch_batches=_UpperCAmelCase , split_batches=_UpperCAmelCase )
lowerCAmelCase = get_dataloader(_UpperCAmelCase , not dispatch_batches )
lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=_UpperCAmelCase )
lowerCAmelCase = accelerator.prepare(_UpperCAmelCase , _UpperCAmelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int ):
lowerCAmelCase = []
for batch in dataloader:
lowerCAmelCase = batch.values()
with torch.no_grad():
lowerCAmelCase = model(_UpperCAmelCase )
lowerCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
lowerCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(_UpperCAmelCase )
targs.append(_UpperCAmelCase )
lowerCAmelCase = torch.cat(_UpperCAmelCase ), torch.cat(_UpperCAmelCase )
return logits, targs
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Accelerator , _UpperCAmelCase : str=82 , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Optional[int]=16 ):
lowerCAmelCase = get_basic_setup(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
lowerCAmelCase = generate_predictions(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
assert (
len(_UpperCAmelCase ) == num_samples
), F'Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_UpperCAmelCase )}'
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : bool = False , _UpperCAmelCase : bool = False ):
lowerCAmelCase = evaluate.load('glue' , 'mrpc' )
lowerCAmelCase = get_mrpc_setup(_UpperCAmelCase , _UpperCAmelCase )
# First do baseline
lowerCAmelCase = setup['''no''']
model.to(_UpperCAmelCase )
model.eval()
for batch in dataloader:
batch.to(_UpperCAmelCase )
with torch.inference_mode():
lowerCAmelCase = model(**_UpperCAmelCase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=_UpperCAmelCase , references=batch['labels'] )
lowerCAmelCase = metric.compute()
# Then do distributed
lowerCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
lowerCAmelCase = model(**_UpperCAmelCase )
lowerCAmelCase = outputs.logits.argmax(dim=-1 )
lowerCAmelCase = batch['''labels''']
lowerCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=_UpperCAmelCase , references=_UpperCAmelCase )
lowerCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F'Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`' )
test_mrpc(_UpperCAmelCase , _UpperCAmelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
lowerCAmelCase = Accelerator(split_batches=_UpperCAmelCase , dispatch_batches=_UpperCAmelCase )
if accelerator.is_local_main_process:
print(F'With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99' )
test_torch_metrics(_UpperCAmelCase , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
lowerCAmelCase = Accelerator()
test_torch_metrics(_UpperCAmelCase , 512 )
accelerator.state._reset_state()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 4
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , UpperCAmelCase__ : Dict , ):
'''simple docstring'''
lowercase : Any =parent
lowercase : Optional[int] =13
lowercase : Union[str, Any] =7
lowercase : str =30
lowercase : Optional[int] =self.seq_length + self.mem_len
lowercase : Dict =15
lowercase : List[str] =True
lowercase : Optional[int] =True
lowercase : Tuple =99
lowercase : str =[10, 50, 80]
lowercase : List[Any] =32
lowercase : Optional[int] =32
lowercase : int =4
lowercase : Any =8
lowercase : List[Any] =128
lowercase : List[str] =2
lowercase : Tuple =2
lowercase : int =None
lowercase : Optional[int] =1
lowercase : int =0
lowercase : List[str] =3
lowercase : str =self.vocab_size - 1
lowercase : Tuple =0.01
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Union[str, Any] =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
random.seed(self.seed )
tf.random.set_seed(self.seed )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Tuple =TFTransfoXLModel(UpperCAmelCase__ )
lowercase , lowercase : Optional[Any] =model(UpperCAmelCase__ ).to_tuple()
lowercase : List[str] ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase : Any =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : int =TFTransfoXLLMHeadModel(UpperCAmelCase__ )
lowercase , lowercase : Tuple =model(UpperCAmelCase__ ).to_tuple()
lowercase : Optional[Any] ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase : Optional[int] =model(UpperCAmelCase__ ).to_tuple()
lowercase , lowercase : List[str] =model([input_ids_a, mems_a] ).to_tuple()
lowercase : int ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase : str =model(UpperCAmelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def lowerCamelCase_ ( self : str , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[int] =TFTransfoXLForSequenceClassification(UpperCAmelCase__ )
lowercase : Union[str, Any] =model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : List[Any] =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) : Optional[Any] =config_and_inputs
lowercase : Union[str, Any] ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase_ = () if is_tf_available() else ()
lowerCamelCase_ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def lowerCamelCase_ ( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Any =TFTransfoXLModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase__ , d_embed=37 )
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.model_tester.set_seed()
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowercase , lowercase : Tuple =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : int =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase : str =model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase : Union[str, Any] =model.get_output_embeddings()
assert isinstance(UpperCAmelCase__ , tf.keras.layers.Layer )
lowercase : Any =model.get_bias()
assert name is None
else:
lowercase : Optional[int] =model.get_output_embeddings()
assert x is None
lowercase : Optional[int] =model.get_bias()
assert name is None
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : int =TFTransfoXLModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
pass
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase : Tuple =tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase : Optional[int] =[33,1297,2,1,1009,4,1109,11739,4762,358,5,25,245,22,1706,17,20098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,6224,831,16002,2,8,603,78967,29546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,29546,54,8,3609,5,57211,49,4,1,277,18,8,1755,15691,3,341,25,416,693,42573,71,17,401,94,31,17919,2,29546,7873,18,1,435,23,11011,755,5,5167,3,7983,98,84,2,29546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,29546,824,1400,1868,2,19,160,2,311,8,5496,2,20920,17,25,15097,3,24,24,0,33,1,1857,2,1,1009,4,1109,11739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,71477,20098,104447,2,20961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase : int =model.generate(UpperCAmelCase__ , max_length=200 , do_sample=UpperCAmelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCAmelCase__ )
| 92
| 0
|
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowercase : Any = logging.getLogger()
def SCREAMING_SNAKE_CASE ( ):
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
_UpperCamelCase = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
_UpperCamelCase = {}
_UpperCamelCase = os.path.join(__UpperCamelCase , '''all_results.json''' )
if os.path.exists(__UpperCamelCase ):
with open(__UpperCamelCase , '''r''' ) as f:
_UpperCamelCase = json.load(__UpperCamelCase )
else:
raise ValueError(f'''can\'t find {path}''' )
return results
def SCREAMING_SNAKE_CASE ( ):
_UpperCamelCase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowercase : Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __A( __UpperCAmelCase ):
@classmethod
def _UpperCamelCase ( cls ):
"""simple docstring"""
_UpperCamelCase = tempfile.mkdtemp()
_UpperCamelCase = os.path.join(cls.tmpdir, '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
_UpperCamelCase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def _UpperCamelCase ( cls ):
"""simple docstring"""
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.75 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result['''perplexity'''], 100 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertLess(result['''perplexity'''], 42 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = 7 if get_gpu_count() > 1 else 2
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.75 )
self.assertLess(result['''train_loss'''], 0.5 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''], 28 )
self.assertGreaterEqual(result['''eval_exact'''], 28 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['''eval_accuracy'''], 0.8 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['''eval_rouge1'''], 10 )
self.assertGreaterEqual(result['''eval_rouge2'''], 2 )
self.assertGreaterEqual(result['''eval_rougeL'''], 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''], 7 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n '''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['''eval_bleu'''], 30 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''translation_no_trainer''' ) ) )
@slow
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = logging.StreamHandler(sys.stdout )
logger.addHandler(_SCREAMING_SNAKE_CASE )
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n '''.split()
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result['''eval_overall_accuracy'''], 0.10 )
@mock.patch.dict(os.environ, {'''WANDB_MODE''': '''offline'''} )
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = self.get_auto_remove_tmp_dir()
_UpperCamelCase = F'''\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n '''.split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
_UpperCamelCase = get_results(_SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''], 0.6 )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE, '''image_classification_no_trainer''' ) ) )
| 713
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __A( unittest.TestCase ):
def _UpperCamelCase ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''', revision='''bf16''', dtype=jnp.bfloataa, )
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = num_samples * [prompt]
_UpperCamelCase = sd_pipe.prepare_inputs(A )
_UpperCamelCase = replicate(A )
_UpperCamelCase = shard(A )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(A, jax.device_count() )
_UpperCamelCase = sd_pipe(A, A, A, num_inference_steps=25, jit=A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def _UpperCamelCase ( self ):
"""simple docstring"""
_UpperCamelCase = '''stabilityai/stable-diffusion-2'''
_UpperCamelCase , _UpperCamelCase = FlaxDPMSolverMultistepScheduler.from_pretrained(A, subfolder='''scheduler''' )
_UpperCamelCase , _UpperCamelCase = FlaxStableDiffusionPipeline.from_pretrained(
A, scheduler=A, revision='''bf16''', dtype=jnp.bfloataa, )
_UpperCamelCase = scheduler_params
_UpperCamelCase = '''A painting of a squirrel eating a burger'''
_UpperCamelCase = jax.device_count()
_UpperCamelCase = num_samples * [prompt]
_UpperCamelCase = sd_pipe.prepare_inputs(A )
_UpperCamelCase = replicate(A )
_UpperCamelCase = shard(A )
_UpperCamelCase = jax.random.PRNGKey(0 )
_UpperCamelCase = jax.random.split(A, jax.device_count() )
_UpperCamelCase = sd_pipe(A, A, A, num_inference_steps=25, jit=A )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCamelCase = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase = images[0, 253:256, 253:256, -1]
_UpperCamelCase = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(F'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 105
| 0
|
"""simple docstring"""
import numpy as np
def lowerCAmelCase_ ( lowercase_ : np.ndarray , lowercase_ : np.ndarray , lowercase_ : float = 1E-1_2 , lowercase_ : int = 100 , ):
'''simple docstring'''
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
__SCREAMING_SNAKE_CASE : Any = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = 0
__SCREAMING_SNAKE_CASE : Any = 0
__SCREAMING_SNAKE_CASE : Any = 1E1_2
while not convergence:
# Multiple matrix by the vector.
__SCREAMING_SNAKE_CASE : int = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
__SCREAMING_SNAKE_CASE : int = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__SCREAMING_SNAKE_CASE : Optional[Any] = vector.conj().T if is_complex else vector.T
__SCREAMING_SNAKE_CASE : Optional[int] = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
__SCREAMING_SNAKE_CASE : int = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__SCREAMING_SNAKE_CASE : Optional[int] = True
__SCREAMING_SNAKE_CASE : str = lambda_
if is_complex:
__SCREAMING_SNAKE_CASE : Optional[int] = np.real(lambda_ )
return lambda_, vector
def lowerCAmelCase_ ( ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__SCREAMING_SNAKE_CASE : Optional[Any] = np.array([41, 4, 20] )
__SCREAMING_SNAKE_CASE : Dict = real_input_matrix.astype(np.complexaaa )
__SCREAMING_SNAKE_CASE : List[str] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__SCREAMING_SNAKE_CASE : int = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__SCREAMING_SNAKE_CASE : int = real_input_matrix
__SCREAMING_SNAKE_CASE : str = real_vector
elif problem_type == "complex":
__SCREAMING_SNAKE_CASE : Dict = complex_input_matrix
__SCREAMING_SNAKE_CASE : Optional[int] = complex_vector
# Our implementation.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
__SCREAMING_SNAKE_CASE : int = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__SCREAMING_SNAKE_CASE : Dict = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 674
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_lowerCamelCase = {
'''configuration_gpt_neox_japanese''': ['''GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXJapaneseConfig'''],
'''tokenization_gpt_neox_japanese''': ['''GPTNeoXJapaneseTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXJapaneseForCausalLM''',
'''GPTNeoXJapaneseLayer''',
'''GPTNeoXJapaneseModel''',
'''GPTNeoXJapanesePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 674
| 1
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_lowerCAmelCase : Optional[int] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class A_ ( unittest.TestCase ):
lowerCAmelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCAmelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCAmelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : List[Any] = pipeline(
task="text-classification" ,model="hf-internal-testing/tiny-random-distilbert" ,framework="pt" )
_lowerCamelCase : Tuple = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "LABEL_0", "score": 0.5_04}] )
_lowerCamelCase : Any = text_classifier("This is great !" ,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}] )
_lowerCamelCase : List[str] = text_classifier(["This is great !", "This is bad"] ,top_k=2 )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,[
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] ,)
_lowerCamelCase : Dict = text_classifier("This is great !" ,top_k=1 )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "LABEL_0", "score": 0.5_04}] )
# Legacy behavior
_lowerCamelCase : int = text_classifier("This is great !" ,return_all_scores=__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "LABEL_0", "score": 0.5_04}] )
_lowerCamelCase : List[Any] = text_classifier("This is great !" ,return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,[[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}]] )
_lowerCamelCase : Optional[int] = text_classifier(["This is great !", "Something else"] ,return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,[
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
[{"label": "LABEL_0", "score": 0.5_04}, {"label": "LABEL_1", "score": 0.4_96}],
] ,)
_lowerCamelCase : Tuple = text_classifier(["This is great !", "Something else"] ,return_all_scores=__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,[
{"label": "LABEL_0", "score": 0.5_04},
{"label": "LABEL_0", "score": 0.5_04},
] ,)
@require_torch
def _lowercase ( self: str ):
'''simple docstring'''
import torch
_lowerCamelCase : str = pipeline(
task="text-classification" ,model="hf-internal-testing/tiny-random-distilbert" ,framework="pt" ,device=torch.device("cpu" ) ,)
_lowerCamelCase : List[str] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "LABEL_0", "score": 0.5_04}] )
@require_tf
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : List[str] = pipeline(
task="text-classification" ,model="hf-internal-testing/tiny-random-distilbert" ,framework="tf" )
_lowerCamelCase : Optional[Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "LABEL_0", "score": 0.5_04}] )
@slow
@require_torch
def _lowercase ( self: str ):
'''simple docstring'''
_lowerCamelCase : str = pipeline("text-classification" )
_lowerCamelCase : Union[str, Any] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "POSITIVE", "score": 1.0}] )
_lowerCamelCase : Any = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "NEGATIVE", "score": 1.0}] )
_lowerCamelCase : str = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "POSITIVE", "score": 0.9_88}] )
@slow
@require_tf
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : List[str] = pipeline("text-classification" ,framework="tf" )
_lowerCamelCase : Optional[int] = text_classifier("This is great !" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "POSITIVE", "score": 1.0}] )
_lowerCamelCase : List[Any] = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "NEGATIVE", "score": 1.0}] )
_lowerCamelCase : Dict = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": "POSITIVE", "score": 0.9_88}] )
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: List[str] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = TextClassificationPipeline(model=__lowerCAmelCase ,tokenizer=__lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Any ,__lowerCAmelCase: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_lowerCamelCase : Optional[int] = "HuggingFace is in"
_lowerCamelCase : Optional[int] = text_classifier(__lowerCAmelCase )
self.assertEqual(nested_simplify(__lowerCAmelCase ) ,[{"label": ANY(__lowerCAmelCase ), "score": ANY(__lowerCAmelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
_lowerCamelCase : Optional[int] = ["HuggingFace is in ", "Paris is in France"]
_lowerCamelCase : Any = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,[{"label": ANY(__lowerCAmelCase ), "score": ANY(__lowerCAmelCase )}, {"label": ANY(__lowerCAmelCase ), "score": ANY(__lowerCAmelCase )}] ,)
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_lowerCamelCase : Tuple = text_classifier(__lowerCAmelCase ,top_k=__lowerCAmelCase )
_lowerCamelCase : Tuple = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,[[{"label": ANY(__lowerCAmelCase ), "score": ANY(__lowerCAmelCase )}] * N, [{"label": ANY(__lowerCAmelCase ), "score": ANY(__lowerCAmelCase )}] * N] ,)
_lowerCamelCase : Optional[Any] = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
_lowerCamelCase : Union[str, Any] = text_classifier(__lowerCAmelCase )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,{"label": ANY(__lowerCAmelCase ), "score": ANY(__lowerCAmelCase )} ,)
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_lowerCamelCase : Optional[Any] = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(__lowerCAmelCase ):
text_classifier(__lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_lowerCamelCase : Dict = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(__lowerCAmelCase ) ,[{"label": ANY(__lowerCAmelCase ), "score": ANY(__lowerCAmelCase )}] ,)
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 386
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ ( unittest.TestCase ):
def __init__( self: Any ,__lowerCAmelCase: int ,__lowerCAmelCase: Optional[Any]=13 ,__lowerCAmelCase: List[str]=3 ,__lowerCAmelCase: Optional[Any]=224 ,__lowerCAmelCase: Optional[int]=30 ,__lowerCAmelCase: Union[str, Any]=400 ,__lowerCAmelCase: List[Any]=True ,__lowerCAmelCase: Any=None ,__lowerCAmelCase: str=True ,__lowerCAmelCase: Union[str, Any]=[0.5, 0.5, 0.5] ,__lowerCAmelCase: Tuple=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = size if size is not None else {"height": 18, "width": 18}
_lowerCamelCase : Tuple = parent
_lowerCamelCase : List[str] = batch_size
_lowerCamelCase : Any = num_channels
_lowerCamelCase : Union[str, Any] = image_size
_lowerCamelCase : Optional[int] = min_resolution
_lowerCamelCase : List[str] = max_resolution
_lowerCamelCase : int = do_resize
_lowerCamelCase : Dict = size
_lowerCamelCase : Optional[int] = do_normalize
_lowerCamelCase : int = image_mean
_lowerCamelCase : Tuple = image_std
def _lowercase ( self: Optional[Any] ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class A_ ( _a , unittest.TestCase ):
lowerCAmelCase__ = ViTImageProcessor if is_vision_available() else None
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = EfficientFormerImageProcessorTester(self )
@property
def _lowercase ( self: Tuple ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_mean" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"image_std" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_normalize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"do_resize" ) )
self.assertTrue(hasattr(__lowerCAmelCase ,"size" ) )
def _lowercase ( self: List[Any] ):
'''simple docstring'''
pass
def _lowercase ( self: Dict ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCamelCase : Dict = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,Image.Image )
# Test not batched input
_lowerCamelCase : Dict = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Optional[Any] = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
def _lowercase ( self: Tuple ):
'''simple docstring'''
_lowerCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCamelCase : str = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase ,numpify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,np.ndarray )
# Test not batched input
_lowerCamelCase : List[Any] = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Dict = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
def _lowercase ( self: int ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCamelCase : int = prepare_image_inputs(self.image_proc_tester ,equal_resolution=__lowerCAmelCase ,torchify=__lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCAmelCase ,torch.Tensor )
# Test not batched input
_lowerCamelCase : int = image_processor(image_inputs[0] ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
# Test batched
_lowerCamelCase : Tuple = image_processor(__lowerCAmelCase ,return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) ,)
| 386
| 1
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class a__ ( unittest.TestCase ):
def __magic_name__ ( self ):
lowercase : Dict = tempfile.mkdtemp()
lowercase : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowercase : Optional[int] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowercase : Optional[int] = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_a , _a )
def __magic_name__ ( self , **_a ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def __magic_name__ ( self , **_a ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __magic_name__ ( self , **_a ):
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **_a )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase : List[str] = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __magic_name__ ( self ):
lowercase : Tuple = self.get_tokenizer()
lowercase : List[str] = self.get_rust_tokenizer()
lowercase : Tuple = self.get_image_processor()
lowercase : Optional[int] = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_slow.save_pretrained(self.tmpdirname )
lowercase : List[str] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=_a )
lowercase : Optional[Any] = AlignProcessor(tokenizer=_a , image_processor=_a )
processor_fast.save_pretrained(self.tmpdirname )
lowercase : int = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _a )
self.assertIsInstance(processor_fast.tokenizer , _a )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _a )
self.assertIsInstance(processor_fast.image_processor , _a )
def __magic_name__ ( self ):
lowercase : Dict = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase : Dict = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowercase : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _a )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def __magic_name__ ( self ):
lowercase : int = self.get_image_processor()
lowercase : List[Any] = self.get_tokenizer()
lowercase : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase : List[str] = self.prepare_image_inputs()
lowercase : List[Any] = image_processor(_a , return_tensors="np" )
lowercase : str = processor(images=_a , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __magic_name__ ( self ):
lowercase : Tuple = self.get_image_processor()
lowercase : List[Any] = self.get_tokenizer()
lowercase : int = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase : Any = "lower newer"
lowercase : Optional[Any] = processor(text=_a )
lowercase : Optional[int] = tokenizer(_a , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.get_image_processor()
lowercase : Union[str, Any] = self.get_tokenizer()
lowercase : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase : List[str] = "lower newer"
lowercase : Dict = self.prepare_image_inputs()
lowercase : int = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_a ):
processor()
def __magic_name__ ( self ):
lowercase : Optional[Any] = self.get_image_processor()
lowercase : List[Any] = self.get_tokenizer()
lowercase : str = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Optional[Any] = processor.batch_decode(_a )
lowercase : Tuple = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.get_image_processor()
lowercase : Any = self.get_tokenizer()
lowercase : Tuple = AlignProcessor(tokenizer=_a , image_processor=_a )
lowercase : List[Any] = "lower newer"
lowercase : List[str] = self.prepare_image_inputs()
lowercase : int = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 361
|
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a__ ( a_ ):
__lowerCAmelCase = (DDPMScheduler,)
def __magic_name__ ( self , **_a ):
lowercase : Dict = {
"num_train_timesteps": 1_000,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**_a )
return config
def __magic_name__ ( self ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=_a )
def __magic_name__ ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_a , beta_end=_a )
def __magic_name__ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_a )
def __magic_name__ ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_a )
def __magic_name__ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_a )
def __magic_name__ ( self ):
self.check_over_configs(thresholding=_a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_a , prediction_type=_a , sample_max_value=_a , )
def __magic_name__ ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_a )
def __magic_name__ ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_a )
def __magic_name__ ( self ):
lowercase : Union[str, Any] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**_a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Dict = scheduler_class(**_a )
lowercase : Dict = len(_a )
lowercase : str = self.dummy_model()
lowercase : Optional[int] = self.dummy_sample_deter
lowercase : List[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : Optional[Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : str = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : List[str] = pred_prev_sample
lowercase : Dict = torch.sum(torch.abs(_a ) )
lowercase : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def __magic_name__ ( self ):
lowercase : Optional[int] = self.scheduler_classes[0]
lowercase : Any = self.get_scheduler_config(prediction_type="v_prediction" )
lowercase : int = scheduler_class(**_a )
lowercase : str = len(_a )
lowercase : Optional[int] = self.dummy_model()
lowercase : List[str] = self.dummy_sample_deter
lowercase : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_a ) ):
# 1. predict noise residual
lowercase : Union[str, Any] = model(_a , _a )
# 2. predict previous mean of sample x_t-1
lowercase : Optional[Any] = scheduler.step(_a , _a , _a , generator=_a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowercase : Dict = pred_prev_sample
lowercase : str = torch.sum(torch.abs(_a ) )
lowercase : Tuple = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def __magic_name__ ( self ):
lowercase : List[Any] = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Tuple = scheduler_class(**_a )
lowercase : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_a )
lowercase : Union[str, Any] = scheduler.timesteps
for i, timestep in enumerate(_a ):
if i == len(_a ) - 1:
lowercase : Any = -1
else:
lowercase : Union[str, Any] = timesteps[i + 1]
lowercase : Optional[int] = scheduler.previous_timestep(_a )
lowercase : Union[str, Any] = prev_t.item()
self.assertEqual(_a , _a )
def __magic_name__ ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : List[str] = self.get_scheduler_config()
lowercase : List[Any] = scheduler_class(**_a )
lowercase : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=_a )
def __magic_name__ ( self ):
lowercase : Dict = self.scheduler_classes[0]
lowercase : Union[str, Any] = self.get_scheduler_config()
lowercase : Any = scheduler_class(**_a )
lowercase : int = [100, 87, 50, 1, 0]
lowercase : Any = len(_a )
with self.assertRaises(_a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=_a , timesteps=_a )
def __magic_name__ ( self ):
lowercase : str = self.scheduler_classes[0]
lowercase : Tuple = self.get_scheduler_config()
lowercase : Optional[int] = scheduler_class(**_a )
lowercase : List[str] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=_a )
| 361
| 1
|
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__UpperCAmelCase ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__UpperCAmelCase ={"""facebook/blenderbot-3B""": 128}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Optional[Any] = VOCAB_FILES_NAMES
lowercase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[Any] = ["""input_ids""", """attention_mask"""]
lowercase__ : Any = BlenderbotTokenizer
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
A__ = getattr(UpperCamelCase__ , pre_tok_state.pop("type" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**UpperCamelCase__ )
A__ = add_prefix_space
A__ = "post_processor"
A__ = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state["sep"] )
if "cls" in state:
A__ = tuple(state["cls"] )
A__ = False
if state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get("trim_offsets" , UpperCamelCase__ ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(UpperCamelCase__ , state.pop("type" ) )
A__ = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
A__ = value
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , UpperCamelCase__ )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
return token_ids_a + [self.eos_token_id]
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(UpperCamelCase__ )
A__ = " ".join(UpperCamelCase__ )
A__ = self.encode(UpperCamelCase__ )
if len(UpperCamelCase__ ) > self.model_max_length:
A__ = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 261
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCAmelCase ={
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 261
| 1
|
"""simple docstring"""
lowerCamelCase_ = '''
# Transformers ์ค์น ๋ฐฉ๋ฒ
! pip install transformers datasets
# ๋ง์ง๋ง ๋ฆด๋ฆฌ์ค ๋์ ์์ค์์ ์ค์นํ๋ ค๋ฉด, ์ ๋ช
๋ น์ ์ฃผ์์ผ๋ก ๋ฐ๊พธ๊ณ ์๋ ๋ช
๋ น์ ํด์ ํ์ธ์.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 95
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( A__ ):
'''simple docstring'''
a_ : Optional[int] = ["""image_processor""", """tokenizer"""]
a_ : Union[str, Any] = """ViltImageProcessor"""
a_ : Dict = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] , a_ : Optional[int]=None , a_ : Optional[Any]=None , **a_ : str ):
lowerCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase_ : Tuple = kwargs.pop("feature_extractor" )
lowerCAmelCase_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase_ : str = self.image_processor
def __call__( self : int , a_ : List[Any] , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Optional[Any] , ):
lowerCAmelCase_ : Dict = self.tokenizer(
text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_token_type_ids=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , )
# add pixel_values + pixel_mask
lowerCAmelCase_ : Tuple = self.image_processor(a_ , return_tensors=a_ )
encoding.update(a_ )
return encoding
def lowerCamelCase ( self : Union[str, Any] , *a_ : Dict , **a_ : Union[str, Any] ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def lowerCamelCase ( self : Optional[Any] , *a_ : List[str] , **a_ : Any ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : Tuple = self.tokenizer.model_input_names
lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
@property
def lowerCamelCase ( self : Optional[int] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a_ , )
return self.image_processor
| 610
| 0
|
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
__UpperCAmelCase = pd.read_csv(
"""https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/"""
"""position_salaries.csv"""
)
__UpperCAmelCase = dataset.iloc[:, 1:2].values
__UpperCAmelCase = dataset.iloc[:, 2].values
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = train_test_split(X, y, test_size=0.2, random_state=0)
__UpperCAmelCase = PolynomialFeatures(degree=4)
__UpperCAmelCase = poly_reg.fit_transform(X)
__UpperCAmelCase = LinearRegression()
pol_reg.fit(X_poly, y)
def snake_case_ () -> Any:
plt.scatter(__A , __A , color="""red""" )
plt.plot(__A , pol_reg.predict(poly_reg.fit_transform(__A ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 218
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def snake_case_ (__A : Union[str, Any] ) -> Any:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
@staticmethod
def SCREAMING_SNAKE_CASE ( lowerCAmelCase : ArgumentParser ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=lowerCAmelCase , default=lowerCAmelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=lowerCAmelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=lowerCAmelCase )
def __init__( self : Dict , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : bool , lowerCAmelCase : bool ) -> Any:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = model
__lowerCAmelCase : Optional[Any] = cache
__lowerCAmelCase : Optional[Any] = force
__lowerCAmelCase : Tuple = trust_remote_code
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 218
| 1
|
"""simple docstring"""
def lowerCAmelCase_ ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : Tuple ) ->int:
if height >= 1:
move_tower(height - 1 , snake_case_ , snake_case_ , snake_case_ )
move_disk(snake_case_ , snake_case_ )
move_tower(height - 1 , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict ) ->Dict:
print('moving disk from' , snake_case_ , 'to' , snake_case_ )
def lowerCAmelCase_ ( ) ->Optional[int]:
lowerCamelCase__ : List[str] =int(input('Height of hanoi: ' ).strip() )
move_tower(snake_case_ , 'A' , 'B' , 'C' )
if __name__ == "__main__":
main()
| 174
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """timm_backbone"""
def __init__( self :Any , lowerCamelCase_ :int=None , lowerCamelCase_ :Optional[int]=3 , lowerCamelCase_ :int=True , lowerCamelCase_ :Dict=True , lowerCamelCase_ :Union[str, Any]=None , **lowerCamelCase_ :Optional[int] , ):
"""simple docstring"""
super().__init__(**lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =backbone
lowerCamelCase__ : List[Any] =num_channels
lowerCamelCase__ : Tuple =features_only
lowerCamelCase__ : Dict =use_pretrained_backbone
lowerCamelCase__ : Optional[int] =True
lowerCamelCase__ : Optional[int] =out_indices if out_indices is not None else (-1,)
| 174
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE__ = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE__ = {
'camembert-base': 5_1_2,
}
SCREAMING_SNAKE_CASE__ = 'โ'
class a_ ( lowerCamelCase ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ["""input_ids""", """attention_mask"""]
lowercase = CamembertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=["<s>NOTUSED", "</s>NOTUSED"] , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
UpperCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 35
|
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
SCREAMING_SNAKE_CASE__ = CLIPImageProcessor()
SCREAMING_SNAKE_CASE__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
SCREAMING_SNAKE_CASE__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 35
| 1
|
'''simple docstring'''
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
A = threading.Lock()
A = None
A = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
A = logging.WARNING
A = True
def UpperCAmelCase ( ):
lowerCamelCase : str = os.getenv('TRANSFORMERS_VERBOSITY' , UpperCAmelCase__)
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'''Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, '''
F'''has to be one of: { ', '.join(log_levels.keys()) }''')
return _default_log_level
def UpperCAmelCase ( ):
return __name__.split('.')[0]
def UpperCAmelCase ( ):
return logging.getLogger(_get_library_name())
def UpperCAmelCase ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
lowerCamelCase : List[Any] = logging.StreamHandler() # Set sys.stderr as stream.
lowerCamelCase : Optional[int] = sys.stderr.flush
# Apply our default configuration to the library root logger.
lowerCamelCase : Union[str, Any] = _get_library_root_logger()
library_root_logger.addHandler(_default_handler)
library_root_logger.setLevel(_get_default_logging_level())
lowerCamelCase : Dict = False
def UpperCAmelCase ( ):
global _default_handler
with _lock:
if not _default_handler:
return
lowerCamelCase : Tuple = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler)
library_root_logger.setLevel(logging.NOTSET)
lowerCamelCase : Union[str, Any] = None
def UpperCAmelCase ( ):
return log_levels
def UpperCAmelCase ( UpperCAmelCase__ : Optional[str] = None):
if name is None:
lowerCamelCase : Union[str, Any] = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(UpperCAmelCase__)
def UpperCAmelCase ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase ( UpperCAmelCase__ : int):
_configure_library_root_logger()
_get_library_root_logger().setLevel(UpperCAmelCase__)
def UpperCAmelCase ( ):
return set_verbosity(UpperCAmelCase__)
def UpperCAmelCase ( ):
return set_verbosity(UpperCAmelCase__)
def UpperCAmelCase ( ):
return set_verbosity(UpperCAmelCase__)
def UpperCAmelCase ( ):
return set_verbosity(UpperCAmelCase__)
def UpperCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler)
def UpperCAmelCase ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler)
def UpperCAmelCase ( UpperCAmelCase__ : logging.Handler):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(UpperCAmelCase__)
def UpperCAmelCase ( UpperCAmelCase__ : logging.Handler):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(UpperCAmelCase__)
def UpperCAmelCase ( ):
_configure_library_root_logger()
lowerCamelCase : str = False
def UpperCAmelCase ( ):
_configure_library_root_logger()
lowerCamelCase : Dict = True
def UpperCAmelCase ( ):
lowerCamelCase : Optional[Any] = _get_library_root_logger().handlers
for handler in handlers:
lowerCamelCase : Any = logging.Formatter('[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s')
handler.setFormatter(UpperCAmelCase__)
def UpperCAmelCase ( ):
lowerCamelCase : Tuple = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(UpperCAmelCase__)
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase__ : Any , **UpperCAmelCase__ : Dict):
lowerCamelCase : Dict = os.getenv('TRANSFORMERS_NO_ADVISORY_WARNINGS' , UpperCAmelCase__)
if no_advisory_warnings:
return
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__)
A = warning_advice
@functools.lru_cache(UpperCAmelCase__)
def UpperCAmelCase ( self : Optional[int] , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : List[str]):
self.warning(*UpperCAmelCase__ , **UpperCAmelCase__)
A = warning_once
class __snake_case :
def __init__( self, *A, **A ): # pylint: disable=unused-argument
"""simple docstring"""
lowerCamelCase : Any = args[0] if args else None
def __iter__( self ):
"""simple docstring"""
return iter(self._iterator )
def __getattr__( self, A ):
"""simple docstring"""
def empty_fn(*A, **A ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
"""simple docstring"""
return self
def __exit__( self, A, A, A ):
"""simple docstring"""
return
class __snake_case :
def __call__( self, *A, **A ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm(*A, **A )
else:
return EmptyTqdm(*A, **A )
def UpperCAmelCase_ ( self, *A, **A ):
"""simple docstring"""
lowerCamelCase : Any = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*A, **A )
def UpperCAmelCase_ ( self ):
"""simple docstring"""
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
A = _tqdm_cls()
def UpperCAmelCase ( ):
global _tqdm_active
return bool(_tqdm_active)
def UpperCAmelCase ( ):
global _tqdm_active
lowerCamelCase : Optional[Any] = True
hf_hub_utils.enable_progress_bars()
def UpperCAmelCase ( ):
global _tqdm_active
lowerCamelCase : Optional[int] = False
hf_hub_utils.disable_progress_bars()
| 320
|
'''simple docstring'''
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
A = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class __snake_case ( a__):
def __init__( self, A = 101 ):
"""simple docstring"""
lowerCamelCase : int = length
def __len__( self ):
"""simple docstring"""
return self.length
def __getitem__( self, A ):
"""simple docstring"""
return i
class __snake_case :
def __call__( self, A ):
"""simple docstring"""
return {"input_ids": torch.tensor(A ), "labels": torch.tensor(A )}
class __snake_case ( nn.Module):
def __init__( self ):
"""simple docstring"""
super().__init__()
# Add some (unused) params otherwise DDP will complain.
lowerCamelCase : str = nn.Linear(120, 80 )
def UpperCAmelCase_ ( self, A, A=None ):
"""simple docstring"""
if labels is not None:
return torch.tensor(0.0, device=input_ids.device ), input_ids
else:
return input_ids
class __snake_case ( a__):
@require_torch_neuroncore
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = F'''--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowerCamelCase : Optional[Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase : int = F'''--output_dir {output_dir}'''.split()
lowerCamelCase : Optional[Any] = ['torchrun'] + distributed_args + args
execute_subprocess_async(A, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class __snake_case ( a__):
@require_torch_multi_gpu
def UpperCAmelCase_ ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = F'''--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
'''.split()
lowerCamelCase : int = self.get_auto_remove_tmp_dir()
lowerCamelCase : Optional[Any] = F'''--output_dir {output_dir}'''.split()
lowerCamelCase : str = ['torchrun'] + distributed_args + args
execute_subprocess_async(A, env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
A = HfArgumentParser((TrainingArguments,))
A = parser.parse_args_into_dataclasses()[0]
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
f"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
A = DummyDataset(dataset_length)
def UpperCAmelCase ( UpperCAmelCase__ : EvalPrediction):
lowerCamelCase : Union[str, Any] = list(range(len(UpperCAmelCase__)))
lowerCamelCase : Any = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
F'''{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}''')
return {"success": success}
A = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
A = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A = 2
A = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
A = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
A = None
| 320
| 1
|
"""simple docstring"""
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('''socket.socket''')
@patch('''builtins.open''')
def __A ( a_ :Optional[int] , a_ :Dict) -> List[Any]:
__a : str = Mock()
__a : int = conn, Mock()
__a : Dict = iter([1, None])
__a : Union[str, Any] = lambda a_: next(lowercase__)
# ===== invoke =====
send_file(filename='''mytext.txt''' , testing=lowercase__)
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 719
|
"""simple docstring"""
def __A ( a_ :int , a_ :float , a_ :float) -> float:
return round(float(moles / volume) * nfactor)
def __A ( a_ :float , a_ :float , a_ :float) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (volume)))
def __A ( a_ :float , a_ :float , a_ :float) -> float:
return round(float((moles * 0.0_8_2_1 * temperature) / (pressure)))
def __A ( a_ :float , a_ :float , a_ :float) -> float:
return round(float((pressure * volume) / (0.0_8_2_1 * moles)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101
| 0
|
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class _snake_case ( unittest.TestCase ):
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : str = inspect.getfile(accelerate.test_utils)
lowercase__ : Tuple = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["""scripts""", """test_script.py"""])
lowercase__ : Optional[Any] = os.path.sep.join(
mod_file.split(os.path.sep)[:-1] + ["""scripts""", """test_distributed_data_loop.py"""])
lowercase__ : int = os.path.sep.join(mod_file.split(os.path.sep)[:-1] + ["""scripts""", """test_ops.py"""])
@require_multi_gpu
def lowercase__ ( self):
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices.')
lowercase__ : Any = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.test_file_path]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy())
@require_multi_gpu
def lowercase__ ( self):
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices.')
lowercase__ : Tuple = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.operation_file_path]
print(f'Command: {cmd}')
with patch_environment(omp_num_threads=1):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy())
@require_multi_gpu
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Any = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__)]
with patch_environment(omp_num_threads=1):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy())
@require_multi_gpu
def lowercase__ ( self):
'''simple docstring'''
print(f'Found {torch.cuda.device_count()} devices, using 2 devices only')
lowercase__ : Optional[Any] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1"""):
execute_subprocess_async(SCREAMING_SNAKE_CASE_ , env=os.environ.copy())
if __name__ == "__main__":
lowerCamelCase__ : Any = Accelerator()
lowerCamelCase__ : Union[str, Any] = (accelerator.state.process_index + 2, 1_0)
lowerCamelCase__ : Tuple = torch.randint(0, 1_0, shape).to(accelerator.device)
lowerCamelCase__ : List[Any] = """"""
lowerCamelCase__ : str = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowerCamelCase__ : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowerCamelCase__ : List[str] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 12
|
"""simple docstring"""
import os
from typing import Dict, List, Tuple, TypeVar, Union
_a : Optional[Any] = TypeVar('T')
_a : List[Any] = Union[List[T], Tuple[T, ...]]
_a : Tuple = Union[T, List[T], Dict[str, T]]
_a : List[str] = Union[str, bytes, os.PathLike]
| 213
| 0
|
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
UpperCAmelCase = 16
UpperCAmelCase = 32
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 16 )-> List[Any]:
"""simple docstring"""
snake_case_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
snake_case_ = DatasetDict(
{
'''train''': dataset['''train'''].select(SCREAMING_SNAKE_CASE ),
'''validation''': dataset['''train'''].select(SCREAMING_SNAKE_CASE ),
'''test''': dataset['''validation'''],
} )
def tokenize_function(SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ = datasets.map(
SCREAMING_SNAKE_CASE , batched=SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ = 16
elif accelerator.mixed_precision != "no":
snake_case_ = 8
else:
snake_case_ = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
snake_case_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
snake_case_ = DataLoader(
tokenized_datasets['''test'''] , shuffle=SCREAMING_SNAKE_CASE , collate_fn=SCREAMING_SNAKE_CASE , batch_size=SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def __lowerCAmelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )-> Optional[int]:
"""simple docstring"""
snake_case_ = []
# Download the dataset
snake_case_ = load_dataset('''glue''' , '''mrpc''' )
# Create our splits
snake_case_ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ = config['''lr''']
snake_case_ = int(config['''num_epochs'''] )
snake_case_ = int(config['''seed'''] )
snake_case_ = int(config['''batch_size'''] )
snake_case_ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
snake_case_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ = MAX_GPU_BATCH_SIZE
set_seed(SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case_ = kfold.split(np.zeros(datasets['''train'''].num_rows ) , datasets['''train''']['''label'''] )
snake_case_ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ = get_fold_dataloaders(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case_ = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ = model(**SCREAMING_SNAKE_CASE )
snake_case_ = outputs.loss
snake_case_ = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE , )
snake_case_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ = []
for step, batch in enumerate(SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ = model(**SCREAMING_SNAKE_CASE )
snake_case_ = outputs.logits
snake_case_ , snake_case_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(SCREAMING_SNAKE_CASE , dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ = torch.cat(SCREAMING_SNAKE_CASE , dim=0 )
snake_case_ = torch.stack(SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ = metric.compute(predictions=SCREAMING_SNAKE_CASE , references=SCREAMING_SNAKE_CASE )
accelerator.print('''Average test metrics from all folds:''' , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ()-> int:
"""simple docstring"""
snake_case_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
# New Code #
parser.add_argument('''--num_folds''' , type=SCREAMING_SNAKE_CASE , default=3 , help='''The number of splits to perform across the dataset''' )
snake_case_ = parser.parse_args()
snake_case_ = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 720
|
def __lowerCAmelCase (SCREAMING_SNAKE_CASE = 3 , SCREAMING_SNAKE_CASE = 7 , SCREAMING_SNAKE_CASE = 100_0000 )-> int:
"""simple docstring"""
snake_case_ = 0
snake_case_ = 1
for current_denominator in range(1 , limit + 1 ):
snake_case_ = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
snake_case_ = current_numerator
snake_case_ = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 531
| 0
|
'''simple docstring'''
def lowerCamelCase_ ( A_ , A_ ):
return int((input_a, input_a).count(1 ) != 0 )
def lowerCamelCase_ ( ):
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 316
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCamelCase = dict(zip(_snake_case , range(len(_snake_case ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_snake_case ) )
__lowerCamelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__lowerCamelCase = os.path.join(self.tmpdirname , _snake_case )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_snake_case , _snake_case )
def _lowerCamelCase ( self , **_snake_case ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **_snake_case )
def _lowerCamelCase ( self , **_snake_case ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **_snake_case )
def _lowerCamelCase ( self , **_snake_case ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(_snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = OwlViTProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_snake_case )
__lowerCamelCase = OwlViTProcessor(tokenizer=_snake_case , image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _snake_case )
self.assertIsInstance(processor_fast.tokenizer , _snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _snake_case )
self.assertIsInstance(processor_fast.image_processor , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = self.get_image_processor(do_normalize=_snake_case )
__lowerCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_snake_case )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _snake_case )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(_snake_case , return_tensors='''np''' )
__lowerCamelCase = processor(images=_snake_case , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = processor(text=_snake_case , return_tensors='''np''' )
__lowerCamelCase = tokenizer(_snake_case , return_tensors='''np''' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=_snake_case , images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''google/owlvit-base-patch32'''
__lowerCamelCase = OwlViTProcessor.from_pretrained(_snake_case )
__lowerCamelCase = ['''cat''', '''nasa badge''']
__lowerCamelCase = processor(text=_snake_case )
__lowerCamelCase = 16
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''google/owlvit-base-patch32'''
__lowerCamelCase = OwlViTProcessor.from_pretrained(_snake_case )
__lowerCamelCase = [['''cat''', '''nasa badge'''], ['''person''']]
__lowerCamelCase = processor(text=_snake_case )
__lowerCamelCase = 16
__lowerCamelCase = len(_snake_case )
__lowerCamelCase = max([len(_snake_case ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''google/owlvit-base-patch32'''
__lowerCamelCase = OwlViTProcessor.from_pretrained(_snake_case )
__lowerCamelCase = ['''cat''', '''nasa badge''']
__lowerCamelCase = processor(text=_snake_case )
__lowerCamelCase = 16
__lowerCamelCase = inputs['''input_ids''']
__lowerCamelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] )
self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(images=_snake_case , query_images=_snake_case )
self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=_snake_case , image_processor=_snake_case )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(_snake_case )
__lowerCamelCase = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case , _snake_case )
| 316
| 1
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __UpperCAmelCase ( a_: dict ):
return (data["data"], data["target"])
def __UpperCAmelCase ( a_: np.ndarray, a_: np.ndarray, a_: np.ndarray ):
_UpperCAmelCase : Union[str, Any] = XGBRegressor(verbosity=0, random_state=42 )
xgb.fit(a_, a_ )
# Predict target for test data
_UpperCAmelCase : Optional[int] = xgb.predict(a_ )
_UpperCAmelCase : List[Any] = predictions.reshape(len(a_ ), 1 )
return predictions
def __UpperCAmelCase ( ):
_UpperCAmelCase : List[Any] = fetch_california_housing()
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = data_handling(a_ )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = train_test_split(
a_, a_, test_size=0.25, random_state=1 )
_UpperCAmelCase : Dict = xgboost(a_, a_, a_ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(a_, a_ )}""" )
print(f"""Mean Square Error : {mean_squared_error(a_, a_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 257
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 257
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
lowercase__ : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case__=None , snake_case__=None ) -> Tuple:
return field(default_factory=lambda: default , metadata=snake_case__ )
@dataclass
class lowercase_ :
"""simple docstring"""
UpperCAmelCase_ : List[str] = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
UpperCAmelCase_ : List[int] = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""} )
UpperCAmelCase_ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""} )
UpperCAmelCase_ : bool = field(default=UpperCamelCase_ , metadata={"""help""": """Use FP16 to accelerate inference."""} )
UpperCAmelCase_ : bool = field(default=UpperCamelCase_ , metadata={"""help""": """Benchmark training of model"""} )
UpperCAmelCase_ : bool = field(default=UpperCamelCase_ , metadata={"""help""": """Verbose memory tracing"""} )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
UpperCAmelCase_ : bool = field(default=UpperCamelCase_ , metadata={"""help""": """Trace memory line by line"""} )
UpperCAmelCase_ : bool = field(default=UpperCamelCase_ , metadata={"""help""": """Save result to a CSV file"""} )
UpperCAmelCase_ : bool = field(default=UpperCamelCase_ , metadata={"""help""": """Save all print statements in a log file"""} )
UpperCAmelCase_ : bool = field(default=UpperCamelCase_ , metadata={"""help""": """Whether to print environment information"""} )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
UpperCAmelCase_ : str = field(
default=F"inference_time_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
UpperCAmelCase_ : str = field(
default=F"inference_memory_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
UpperCAmelCase_ : str = field(
default=F"train_time_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
UpperCAmelCase_ : str = field(
default=F"train_memory_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
UpperCAmelCase_ : str = field(
default=F"env_info_{round(time() )}.csv" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
UpperCAmelCase_ : str = field(
default=F"log_{round(time() )}.csv" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
UpperCAmelCase_ : int = field(default=3 , metadata={"""help""": """Times an experiment will be run."""} )
UpperCAmelCase_ : bool = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , __SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 312
|
import os
lowercase__ : List[str] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(snake_case__ ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> str:
lowerCAmelCase = ''''''
lowerCAmelCase = num // 1_0_0_0
numerals += m_count * "M"
num %= 1_0_0_0
lowerCAmelCase = num // 1_0_0
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 1_0_0
lowerCAmelCase = num // 1_0
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 1_0
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def SCREAMING_SNAKE_CASE_ ( snake_case__ = "/p089_roman.txt" ) -> int:
lowerCAmelCase = 0
with open(os.path.dirname(snake_case__ ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(snake_case__ )
lowerCAmelCase = generate_roman_numerals(snake_case__ )
savings += len(snake_case__ ) - len(snake_case__ )
return savings
if __name__ == "__main__":
print(f'{solution() = }')
| 312
| 1
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_a: List[str] = logging.get_logger(__name__)
def __lowerCAmelCase ( A ):
UpperCAmelCase_ = r"\w+[.]\d+"
UpperCAmelCase_ = re.findall(A , A )
for pat in pats:
UpperCAmelCase_ = key.replace(A , "_".join(pat.split("." ) ) )
return key
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
if (
any("norm" in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase_ = pt_tuple_key[:-1] + ("scale",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase_ = pt_tuple_key[:-1] + ("embedding",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase_ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase_ = pt_tuple_key[:-1] + ("kernel",)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase_ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase_ = pt_tuple_key[:-1] + ("weight",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase_ = pt_tuple_key[:-1] + ("bias",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def __lowerCAmelCase ( A , A , A=42 ):
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase_ = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase_ = flax_model.init_weights(PRNGKey(A ) )
UpperCAmelCase_ = flatten_dict(A )
UpperCAmelCase_ = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase_ = rename_key(A )
UpperCAmelCase_ = tuple(renamed_pt_key.split("." ) )
# Correctly rename weight parameters
UpperCAmelCase_ , UpperCAmelCase_ = rename_key_and_reshape_tensor(A , A , A )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F"PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape "
F"{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}." )
# also add unexpected weight so that warning is thrown
UpperCAmelCase_ = jnp.asarray(A )
return unflatten_dict(A )
| 268
|
from __future__ import annotations
class __UpperCamelCase :
def __init__( self : Optional[Any] , lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = text, pattern
UpperCAmelCase_ , UpperCAmelCase_ = len(lowerCAmelCase ), len(lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : str ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def __A ( self : int , lowerCAmelCase : int ):
'''simple docstring'''
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase_ = self.mismatch_in_text(lowerCAmelCase )
if mismatch_index == -1:
positions.append(lowerCAmelCase )
else:
UpperCAmelCase_ = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_a: Dict = """ABAABA"""
_a: Union[str, Any] = """AB"""
_a: str = BoyerMooreSearch(text, pattern)
_a: Dict = bms.bad_character_heuristic()
if len(positions) == 0:
print("""No match found""")
else:
print("""Pattern found in following positions: """)
print(positions)
| 268
| 1
|
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Tuple:
"""simple docstring"""
lowercase = checkpoint
lowercase = {}
lowercase = vae_state_dict['''encoder.conv_in.weight''']
lowercase = vae_state_dict['''encoder.conv_in.bias''']
lowercase = vae_state_dict['''encoder.conv_out.weight''']
lowercase = vae_state_dict['''encoder.conv_out.bias''']
lowercase = vae_state_dict['''encoder.norm_out.weight''']
lowercase = vae_state_dict['''encoder.norm_out.bias''']
lowercase = vae_state_dict['''decoder.conv_in.weight''']
lowercase = vae_state_dict['''decoder.conv_in.bias''']
lowercase = vae_state_dict['''decoder.conv_out.weight''']
lowercase = vae_state_dict['''decoder.conv_out.bias''']
lowercase = vae_state_dict['''decoder.norm_out.weight''']
lowercase = vae_state_dict['''decoder.norm_out.bias''']
lowercase = vae_state_dict['''quant_conv.weight''']
lowercase = vae_state_dict['''quant_conv.bias''']
lowercase = vae_state_dict['''post_quant_conv.weight''']
lowercase = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
lowercase = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
lowercase = {
layer_id: [key for key in vae_state_dict if f'down.{layer_id}' in key] for layer_id in range(lowercase__ )
}
# Retrieves the keys for the decoder up blocks only
lowercase = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
lowercase = {
layer_id: [key for key in vae_state_dict if f'up.{layer_id}' in key] for layer_id in range(lowercase__ )
}
for i in range(lowercase__ ):
lowercase = [key for key in down_blocks[i] if f'down.{i}' in key and f'down.{i}.downsample' not in key]
if f'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
lowercase = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.weight' )
lowercase = vae_state_dict.pop(
f'encoder.down.{i}.downsample.conv.bias' )
lowercase = renew_vae_resnet_paths(lowercase__ )
lowercase = {'''old''': f'down.{i}.block', '''new''': f'down_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
lowercase = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
lowercase = 2
for i in range(1, num_mid_res_blocks + 1 ):
lowercase = [key for key in mid_resnets if f'encoder.mid.block_{i}' in key]
lowercase = renew_vae_resnet_paths(lowercase__ )
lowercase = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
lowercase = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
lowercase = renew_vae_attention_paths(lowercase__ )
lowercase = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
conv_attn_to_linear(lowercase__ )
for i in range(lowercase__ ):
lowercase = num_up_blocks - 1 - i
lowercase = [
key for key in up_blocks[block_id] if f'up.{block_id}' in key and f'up.{block_id}.upsample' not in key
]
if f'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
lowercase = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.weight'
]
lowercase = vae_state_dict[
f'decoder.up.{block_id}.upsample.conv.bias'
]
lowercase = renew_vae_resnet_paths(lowercase__ )
lowercase = {'''old''': f'up.{block_id}.block', '''new''': f'up_blocks.{i}.resnets'}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
lowercase = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
lowercase = 2
for i in range(1, num_mid_res_blocks + 1 ):
lowercase = [key for key in mid_resnets if f'decoder.mid.block_{i}' in key]
lowercase = renew_vae_resnet_paths(lowercase__ )
lowercase = {'''old''': f'mid.block_{i}', '''new''': f'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
lowercase = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
lowercase = renew_vae_attention_paths(lowercase__ )
lowercase = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(lowercase__, lowercase__, lowercase__, additional_replacements=[meta_path], config=lowercase__ )
conv_attn_to_linear(lowercase__ )
return new_checkpoint
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, )-> List[str]:
"""simple docstring"""
lowercase = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
lowercase = io.BytesIO(r.content )
lowercase = OmegaConf.load(lowercase__ )
lowercase = 512
lowercase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
lowercase = {}
with safe_open(lowercase__, framework='''pt''', device='''cpu''' ) as f:
for key in f.keys():
lowercase = f.get_tensor(lowercase__ )
else:
lowercase = torch.load(lowercase__, map_location=lowercase__ )['''state_dict''']
# Convert the VAE model.
lowercase = create_vae_diffusers_config(lowercase__, image_size=lowercase__ )
lowercase = custom_convert_ldm_vae_checkpoint(lowercase__, lowercase__ )
lowercase = AutoencoderKL(**lowercase__ )
vae.load_state_dict(lowercase__ )
vae.save_pretrained(lowercase__ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
A_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 604
|
def a__ ( lowercase__ ):
'''simple docstring'''
UpperCAmelCase_ =int(lowercase__ )
if n_element < 1:
UpperCAmelCase_ =ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ =[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =(0, 0, 0)
UpperCAmelCase_ =1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
__lowercase : Tuple =input("""Enter the last number (nth term) of the Hamming Number Series: """)
print("""Formula of Hamming Number Series => 2^i * 3^j * 5^k""")
__lowercase : Union[str, Any] =hamming(int(n))
print("""-----------------------------------------------------""")
print(f"""The list with nth numbers is: {hamming_numbers}""")
print("""-----------------------------------------------------""")
| 54
| 0
|
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_A: Dict = [8, 5, 9, 7]
_A: str = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_A: str = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class UpperCAmelCase :
def __init__( self , __A , __A , __A , ):
__UpperCAmelCase = claim_vector
__UpperCAmelCase = allocated_resources_table
__UpperCAmelCase = maximum_claim_table
def __lowerCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(__A ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCamelCase ( self ):
return {self.__need().index(__A ): i for i in self.__need()}
def __lowerCamelCase ( self , **__A ):
__UpperCAmelCase = self.__need()
__UpperCAmelCase = self.__allocated_resources_table
__UpperCAmelCase = self.__available_resources()
__UpperCAmelCase = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__UpperCAmelCase = False
for each_need in need_list:
__UpperCAmelCase = True
for index, need in enumerate(__A ):
if need > available_resources[index]:
__UpperCAmelCase = False
break
if execution:
__UpperCAmelCase = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__UpperCAmelCase = original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(__A )
# update available/freed resources stack
__UpperCAmelCase = np.array(__A ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(__A ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowerCamelCase ( self ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(__A ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(__A ) + 1}'
+ ' '.join(f'{it:>8}' for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(__A ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(__A ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 617
|
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=[] )-> Any:
__UpperCAmelCase = size[0] - overlap_pixels * 2
__UpperCAmelCase = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
__UpperCAmelCase = np.ones((size_y, size_x) , dtype=np.uinta ) * 2_55
__UpperCAmelCase = np.pad(_lowerCAmelCase , mode='linear_ramp' , pad_width=_lowerCAmelCase , end_values=0 )
if "l" in remove_borders:
__UpperCAmelCase = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
__UpperCAmelCase = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
__UpperCAmelCase = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
__UpperCAmelCase = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Any:
return max(_lowerCAmelCase , min(_lowerCAmelCase , _lowerCAmelCase ) )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Dict:
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Dict:
__UpperCAmelCase = list(_lowerCAmelCase )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
__UpperCAmelCase = clamp_rect(_lowerCAmelCase , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )-> Tuple:
__UpperCAmelCase = Image.new('RGB' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(_lowerCAmelCase , (original_slice, 0) )
return result
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> List[Any]:
__UpperCAmelCase = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
__UpperCAmelCase = tile.crop(_lowerCAmelCase )
return tile
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> Optional[int]:
__UpperCAmelCase = n % d
return n - divisor
class UpperCAmelCase ( UpperCAmelCase_ ):
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def __lowerCamelCase ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
torch.manual_seed(0 )
__UpperCAmelCase = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
__UpperCAmelCase = add_overlap_rect(__A , __A , image.size )
__UpperCAmelCase = image.crop(__A )
__UpperCAmelCase = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
__UpperCAmelCase = translated_slice_x - (original_image_slice / 2)
__UpperCAmelCase = max(0 , __A )
__UpperCAmelCase = squeeze_tile(__A , __A , __A , __A )
__UpperCAmelCase = to_input.size
__UpperCAmelCase = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
__UpperCAmelCase = super(__A , self ).__call__(image=__A , **__A ).images[0]
__UpperCAmelCase = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
__UpperCAmelCase = unsqueeze_tile(__A , __A )
__UpperCAmelCase = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
__UpperCAmelCase = []
if x == 0:
remove_borders.append('l' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('r' )
if y == 0:
remove_borders.append('t' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('b' )
__UpperCAmelCase = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode='L' , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
__UpperCAmelCase = Image.new('RGB' , (image.size[0] * 4, image.size[1] * 4) )
__UpperCAmelCase = math.ceil(image.size[0] / tile_size )
__UpperCAmelCase = math.ceil(image.size[1] / tile_size )
__UpperCAmelCase = tcx * tcy
__UpperCAmelCase = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({'progress': current_count / total_tile_count, 'image': final_image} )
return final_image
def _lowerCAmelCase ( )-> str:
# Run a demo
__UpperCAmelCase = 'stabilityai/stable-diffusion-x4-upscaler'
__UpperCAmelCase = StableDiffusionTiledUpscalePipeline.from_pretrained(_lowerCAmelCase , revision='fp16' , torch_dtype=torch.floataa )
__UpperCAmelCase = pipe.to('cuda' )
__UpperCAmelCase = Image.open('../../docs/source/imgs/diffusers_library.jpg' )
def callback(_lowerCAmelCase ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('diffusers_library_progress.jpg' )
__UpperCAmelCase = pipe(image=_lowerCAmelCase , prompt='Black font, white background, vector' , noise_level=40 , callback=_lowerCAmelCase )
final_image.save('diffusers_library.jpg' )
if __name__ == "__main__":
main()
| 617
| 1
|
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_lowerCAmelCase = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class __UpperCamelCase ( a__ ):
@staticmethod
def __lowerCamelCase ( _A ):
'''simple docstring'''
_lowerCAmelCase : List[str] = parser.add_parser(
'convert' ,help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' ,)
train_parser.add_argument('--model_type' ,type=_A ,required=_A ,help='Model\'s type.' )
train_parser.add_argument(
'--tf_checkpoint' ,type=_A ,required=_A ,help='TensorFlow checkpoint path or folder.' )
train_parser.add_argument(
'--pytorch_dump_output' ,type=_A ,required=_A ,help='Path to the PyTorch saved model output.' )
train_parser.add_argument('--config' ,type=_A ,default='' ,help='Configuration file path or folder.' )
train_parser.add_argument(
'--finetuning_task_name' ,type=_A ,default=_A ,help='Optional fine-tuning task name if the TF model was a finetuned model.' ,)
train_parser.set_defaults(func=_A )
def __init__( self ,_A ,_A ,_A ,_A ,_A ,*_A ,):
'''simple docstring'''
_lowerCAmelCase : str = logging.get_logger('transformers-cli/converting' )
self._logger.info(F"""Loading model {model_type}""" )
_lowerCAmelCase : int = model_type
_lowerCAmelCase : Optional[Any] = tf_checkpoint
_lowerCAmelCase : Union[str, Any] = pytorch_dump_output
_lowerCAmelCase : Optional[int] = config
_lowerCAmelCase : Optional[Any] = finetuning_task_name
def __lowerCamelCase ( self ):
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_A )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
if "ckpt" in self._tf_checkpoint.lower():
_lowerCAmelCase : List[Any] = self._tf_checkpoint
_lowerCAmelCase : Union[str, Any] = ''
else:
_lowerCAmelCase : Optional[int] = self._tf_checkpoint
_lowerCAmelCase : Dict = ''
convert_transfo_xl_checkpoint_to_pytorch(
_A ,self._config ,self._pytorch_dump_output ,_A )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint ,self._config ,self._pytorch_dump_output ,self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint ,self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint ,self._config ,self._pytorch_dump_output )
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]' )
| 259
|
"""simple docstring"""
import random
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = num - 1
_lowerCAmelCase : List[Any] = 0
while s % 2 == 0:
_lowerCAmelCase : Tuple = s // 2
t += 1
for _ in range(5 ):
_lowerCAmelCase : Dict = random.randrange(2 , num - 1 )
_lowerCAmelCase : str = pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if v != 1:
_lowerCAmelCase : Union[str, Any] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
_lowerCAmelCase : str = i + 1
_lowerCAmelCase : List[str] = (v**2) % num
return True
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
if num < 2:
return False
_lowerCAmelCase : Any = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_lowerCamelCase )
def lowerCamelCase__ ( _lowerCamelCase = 1024 ):
'''simple docstring'''
while True:
_lowerCAmelCase : List[str] = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_lowerCamelCase ):
return num
if __name__ == "__main__":
_lowerCAmelCase = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 259
| 1
|
'''simple docstring'''
from __future__ import annotations
from PIL import Image
# Define glider example
UpperCAmelCase_ = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
UpperCAmelCase_ = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def SCREAMING_SNAKE_CASE ( a_ : list[list[int]] ):
__a = []
for i in range(len(a_ ) ):
__a = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
__a = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(a_ ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(a_ ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(a_ ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
__a = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(a_ )
return next_generation
def SCREAMING_SNAKE_CASE ( a_ : list[list[int]] , a_ : int ):
__a = []
for _ in range(a_ ):
# Create output image
__a = Image.new('RGB' , (len(cells[0] ), len(a_ )) )
__a = img.load()
# Save cells to image
for x in range(len(a_ ) ):
for y in range(len(cells[0] ) ):
__a = 255 - cells[y][x] * 255
__a = (colour, colour, colour)
# Save image
images.append(a_ )
__a = new_generation(a_ )
return images
if __name__ == "__main__":
UpperCAmelCase_ = generate_images(GLIDER, 16)
images[0].save("out.gif", save_all=True, append_images=images[1:])
| 490
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
"configuration_roformer": ["ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "RoFormerConfig", "RoFormerOnnxConfig"],
"tokenization_roformer": ["RoFormerTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ["RoFormerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"RoFormerForCausalLM",
"RoFormerForMaskedLM",
"RoFormerForMultipleChoice",
"RoFormerForQuestionAnswering",
"RoFormerForSequenceClassification",
"RoFormerForTokenClassification",
"RoFormerLayer",
"RoFormerModel",
"RoFormerPreTrainedModel",
"load_tf_weights_in_roformer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRoFormerForCausalLM",
"TFRoFormerForMaskedLM",
"TFRoFormerForMultipleChoice",
"TFRoFormerForQuestionAnswering",
"TFRoFormerForSequenceClassification",
"TFRoFormerForTokenClassification",
"TFRoFormerLayer",
"TFRoFormerModel",
"TFRoFormerPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxRoFormerForMaskedLM",
"FlaxRoFormerForMultipleChoice",
"FlaxRoFormerForQuestionAnswering",
"FlaxRoFormerForSequenceClassification",
"FlaxRoFormerForTokenClassification",
"FlaxRoFormerModel",
"FlaxRoFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 490
| 1
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A : List[str] = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase( __snake_case ):
'''simple docstring'''
__magic_name__ = ['pixel_values']
def __init__( self , snake_case_ = True , snake_case_ = None , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = True , snake_case_ = None , snake_case_ = True , snake_case_ = 1 / 255 , snake_case_ = True , snake_case_ = None , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
super().__init__(**snake_case_ )
_A = size if size is not None else {'shortest_edge': 224}
_A = get_size_dict(snake_case_ , default_to_square=snake_case_ )
_A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_A = get_size_dict(snake_case_ , default_to_square=snake_case_ , param_name='crop_size' )
_A = do_resize
_A = size
_A = resample
_A = do_center_crop
_A = crop_size
_A = do_rescale
_A = rescale_factor
_A = do_normalize
_A = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
_A = image_std if image_std is not None else OPENAI_CLIP_STD
_A = do_convert_rgb
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = PILImageResampling.BICUBIC , snake_case_ = None , **snake_case_ , ):
_A = get_size_dict(snake_case_ , default_to_square=snake_case_ )
if "shortest_edge" not in size:
raise ValueError(F"The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}" )
_A = get_resize_output_image_size(snake_case_ , size=size['shortest_edge'] , default_to_square=snake_case_ )
return resize(snake_case_ , size=snake_case_ , resample=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
_A = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F"The `size` parameter must contain the keys (height, width). Got {size.keys()}" )
return center_crop(snake_case_ , size=(size['height'], size['width']) , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return rescale(snake_case_ , scale=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = None , **snake_case_ , ):
return normalize(snake_case_ , mean=snake_case_ , std=snake_case_ , data_format=snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = ChannelDimension.FIRST , **snake_case_ , ):
_A = do_resize if do_resize is not None else self.do_resize
_A = size if size is not None else self.size
_A = get_size_dict(snake_case_ , param_name='size' , default_to_square=snake_case_ )
_A = resample if resample is not None else self.resample
_A = do_center_crop if do_center_crop is not None else self.do_center_crop
_A = crop_size if crop_size is not None else self.crop_size
_A = get_size_dict(snake_case_ , param_name='crop_size' , default_to_square=snake_case_ )
_A = do_rescale if do_rescale is not None else self.do_rescale
_A = rescale_factor if rescale_factor is not None else self.rescale_factor
_A = do_normalize if do_normalize is not None else self.do_normalize
_A = image_mean if image_mean is not None else self.image_mean
_A = image_std if image_std is not None else self.image_std
_A = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
_A = make_list_of_images(snake_case_ )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
_A = [convert_to_rgb(snake_case_ ) for image in images]
# All transformations expect numpy arrays.
_A = [to_numpy_array(snake_case_ ) for image in images]
if do_resize:
_A = [self.resize(image=snake_case_ , size=snake_case_ , resample=snake_case_ ) for image in images]
if do_center_crop:
_A = [self.center_crop(image=snake_case_ , size=snake_case_ ) for image in images]
if do_rescale:
_A = [self.rescale(image=snake_case_ , scale=snake_case_ ) for image in images]
if do_normalize:
_A = [self.normalize(image=snake_case_ , mean=snake_case_ , std=snake_case_ ) for image in images]
_A = [to_channel_dimension_format(snake_case_ , snake_case_ ) for image in images]
_A = {'pixel_values': images}
return BatchFeature(data=snake_case_ , tensor_type=snake_case_ )
| 27
|
'''simple docstring'''
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE( A__ , A__ , A__ ):
"""simple docstring"""
lowerCamelCase__ = [r"""h\.\d+\.attn\.bias""", r"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : Optional[int] = None , __snake_case : int = 50257 , __snake_case : int = 1024 , __snake_case : int = 768 , __snake_case : int = 12 , __snake_case : int = 12 , __snake_case : Optional[int] = None , __snake_case : str = "gelu_new" , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 0.1 , __snake_case : float = 1E-5 , __snake_case : float = 0.02 , __snake_case : bool = True , __snake_case : bool = True , __snake_case : bool = False , __snake_case : bool = False , ) -> Tuple:
super().__init__()
UpperCAmelCase : Optional[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
UpperCAmelCase : List[str] = prefix_inner_dim
UpperCAmelCase : Tuple = prefix_hidden_dim
UpperCAmelCase : List[Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
UpperCAmelCase : List[str] = (
nn.Linear(self.prefix_hidden_dim , __snake_case ) if self.prefix_hidden_dim is not None else nn.Identity()
)
UpperCAmelCase : Dict = GPTaConfig(
vocab_size=__snake_case , n_positions=__snake_case , n_embd=__snake_case , n_layer=__snake_case , n_head=__snake_case , n_inner=__snake_case , activation_function=__snake_case , resid_pdrop=__snake_case , embd_pdrop=__snake_case , attn_pdrop=__snake_case , layer_norm_epsilon=__snake_case , initializer_range=__snake_case , scale_attn_weights=__snake_case , use_cache=__snake_case , scale_attn_by_inverse_layer_idx=__snake_case , reorder_and_upcast_attn=__snake_case , )
UpperCAmelCase : List[Any] = GPTaLMHeadModel(__snake_case )
def A ( self : Dict , __snake_case : torch.Tensor , __snake_case : torch.Tensor , __snake_case : Optional[torch.Tensor] = None , __snake_case : Optional[torch.Tensor] = None , ) -> Any:
UpperCAmelCase : Optional[int] = self.transformer.transformer.wte(__snake_case )
UpperCAmelCase : Union[str, Any] = self.encode_prefix(__snake_case )
UpperCAmelCase : List[str] = self.decode_prefix(__snake_case )
UpperCAmelCase : int = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
UpperCAmelCase : List[Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
UpperCAmelCase : int = torch.cat((dummy_token, input_ids) , dim=1 )
UpperCAmelCase : str = self.transformer(inputs_embeds=__snake_case , labels=__snake_case , attention_mask=__snake_case )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def A ( self : str , __snake_case : int , __snake_case : torch.device ) -> torch.Tensor:
return torch.zeros(__snake_case , self.prefix_length , dtype=torch.intaa , device=__snake_case )
def A ( self : Optional[Any] , __snake_case : Optional[Any] ) -> Any:
return self.encode_prefix(__snake_case )
@torch.no_grad()
def A ( self : Dict , __snake_case : int , __snake_case : Any , __snake_case : Dict ) -> Optional[Any]:
UpperCAmelCase : int = torch.split(__snake_case , 1 , dim=0 )
UpperCAmelCase : str = []
UpperCAmelCase : List[Any] = []
for feature in features:
UpperCAmelCase : Union[str, Any] = self.decode_prefix(feature.to(__snake_case ) ) # back to the clip feature
# Only support beam search for now
UpperCAmelCase , UpperCAmelCase : Tuple = self.generate_beam(
input_embeds=__snake_case , device=__snake_case , eos_token_id=__snake_case )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
UpperCAmelCase : Optional[Any] = torch.stack(__snake_case )
UpperCAmelCase : Optional[Any] = torch.stack(__snake_case )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def A ( self : Any , __snake_case : List[str]=None , __snake_case : Optional[int]=None , __snake_case : str=None , __snake_case : int = 5 , __snake_case : int = 67 , __snake_case : float = 1.0 , __snake_case : Optional[int] = None , ) -> Optional[Any]:
UpperCAmelCase : str = eos_token_id
UpperCAmelCase : Optional[int] = None
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Optional[int] = torch.ones(__snake_case , device=__snake_case , dtype=torch.int )
UpperCAmelCase : Union[str, Any] = torch.zeros(__snake_case , device=__snake_case , dtype=torch.bool )
if input_embeds is not None:
UpperCAmelCase : str = input_embeds
else:
UpperCAmelCase : Union[str, Any] = self.transformer.transformer.wte(__snake_case )
for i in range(__snake_case ):
UpperCAmelCase : Optional[int] = self.transformer(inputs_embeds=__snake_case )
UpperCAmelCase : Optional[Any] = outputs.logits
UpperCAmelCase : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
UpperCAmelCase : Any = logits.softmax(-1 ).log()
if scores is None:
UpperCAmelCase , UpperCAmelCase : Tuple = logits.topk(__snake_case , -1 )
UpperCAmelCase : Dict = generated.expand(__snake_case , *generated.shape[1:] )
UpperCAmelCase , UpperCAmelCase : Optional[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
UpperCAmelCase : Optional[Any] = next_tokens
else:
UpperCAmelCase : List[str] = tokens.expand(__snake_case , *tokens.shape[1:] )
UpperCAmelCase : Any = torch.cat((tokens, next_tokens) , dim=1 )
else:
UpperCAmelCase : Any = -float(np.inf )
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : Optional[int] = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
UpperCAmelCase : List[str] = scores_sum / seq_lengths[:, None]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = scores_sum_average.view(-1 ).topk(__snake_case , -1 )
UpperCAmelCase : str = next_tokens // scores_sum.shape[1]
UpperCAmelCase : List[Any] = seq_lengths[next_tokens_source]
UpperCAmelCase : str = next_tokens % scores_sum.shape[1]
UpperCAmelCase : Optional[Any] = next_tokens.unsqueeze(1 )
UpperCAmelCase : Any = tokens[next_tokens_source]
UpperCAmelCase : Union[str, Any] = torch.cat((tokens, next_tokens) , dim=1 )
UpperCAmelCase : int = generated[next_tokens_source]
UpperCAmelCase : Any = scores_sum_average * seq_lengths
UpperCAmelCase : List[str] = is_stopped[next_tokens_source]
UpperCAmelCase : int = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
UpperCAmelCase : Optional[int] = torch.cat((generated, next_token_embed) , dim=1 )
UpperCAmelCase : List[Any] = is_stopped + next_tokens.eq(__snake_case ).squeeze()
if is_stopped.all():
break
UpperCAmelCase : str = scores / seq_lengths
UpperCAmelCase : Any = scores.argsort(descending=__snake_case )
# tokens tensors are already padded to max_seq_length
UpperCAmelCase : Union[str, Any] = [tokens[i] for i in order]
UpperCAmelCase : Optional[Any] = torch.stack(__snake_case , dim=0 )
UpperCAmelCase : Dict = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 127
| 0
|
_lowercase : str =[
9_99,
8_00,
7_99,
6_00,
5_99,
5_00,
4_00,
3_99,
3_77,
3_55,
3_33,
3_11,
2_88,
2_66,
2_44,
2_22,
2_00,
1_99,
1_77,
1_55,
1_33,
1_11,
88,
66,
44,
22,
0,
]
_lowercase : Union[str, Any] =[
9_99,
9_76,
9_52,
9_28,
9_05,
8_82,
8_58,
8_57,
8_10,
7_62,
7_15,
7_14,
5_72,
4_29,
4_28,
2_86,
2_85,
2_38,
1_90,
1_43,
1_42,
1_18,
95,
71,
47,
24,
0,
]
_lowercase : Dict =[
9_99,
9_88,
9_77,
9_66,
9_55,
9_44,
9_33,
9_22,
9_11,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_50,
3_00,
2_99,
2_66,
2_33,
2_00,
1_99,
1_79,
1_59,
1_40,
1_20,
1_00,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
_lowercase : str =[
9_99,
9_95,
9_92,
9_89,
9_85,
9_81,
9_78,
9_75,
9_71,
9_67,
9_64,
9_61,
9_57,
9_56,
9_51,
9_47,
9_42,
9_37,
9_33,
9_28,
9_23,
9_19,
9_14,
9_13,
9_08,
9_03,
8_97,
8_92,
8_87,
8_81,
8_76,
8_71,
8_70,
8_64,
8_58,
8_52,
8_46,
8_40,
8_34,
8_28,
8_27,
8_20,
8_13,
8_06,
7_99,
7_92,
7_85,
7_84,
7_77,
7_70,
7_63,
7_56,
7_49,
7_42,
7_41,
7_33,
7_24,
7_16,
7_07,
6_99,
6_98,
6_88,
6_77,
6_66,
6_56,
6_55,
6_45,
6_34,
6_23,
6_13,
6_12,
5_98,
5_84,
5_70,
5_69,
5_55,
5_41,
5_27,
5_26,
5_05,
4_84,
4_83,
4_62,
4_40,
4_39,
3_96,
3_95,
3_52,
3_51,
3_08,
3_07,
2_64,
2_63,
2_20,
2_19,
1_76,
1_32,
88,
44,
0,
]
_lowercase : List[Any] =[
9_99,
9_97,
9_95,
9_92,
9_90,
9_88,
9_86,
9_84,
9_81,
9_79,
9_77,
9_75,
9_72,
9_70,
9_68,
9_66,
9_64,
9_61,
9_59,
9_57,
9_56,
9_54,
9_51,
9_49,
9_46,
9_44,
9_41,
9_39,
9_36,
9_34,
9_31,
9_29,
9_26,
9_24,
9_21,
9_19,
9_16,
9_14,
9_13,
9_10,
9_07,
9_05,
9_02,
8_99,
8_96,
8_93,
8_91,
8_88,
8_85,
8_82,
8_79,
8_77,
8_74,
8_71,
8_70,
8_67,
8_64,
8_61,
8_58,
8_55,
8_52,
8_49,
8_46,
8_43,
8_40,
8_37,
8_34,
8_31,
8_28,
8_27,
8_24,
8_21,
8_17,
8_14,
8_11,
8_08,
8_04,
8_01,
7_98,
7_95,
7_91,
7_88,
7_85,
7_84,
7_80,
7_77,
7_74,
7_70,
7_66,
7_63,
7_60,
7_56,
7_52,
7_49,
7_46,
7_42,
7_41,
7_37,
7_33,
7_30,
7_26,
7_22,
7_18,
7_14,
7_10,
7_07,
7_03,
6_99,
6_98,
6_94,
6_90,
6_85,
6_81,
6_77,
6_73,
6_69,
6_64,
6_60,
6_56,
6_55,
6_50,
6_46,
6_41,
6_36,
6_32,
6_27,
6_22,
6_18,
6_13,
6_12,
6_07,
6_02,
5_96,
5_91,
5_86,
5_80,
5_75,
5_70,
5_69,
5_63,
5_57,
5_51,
5_45,
5_39,
5_33,
5_27,
5_26,
5_19,
5_12,
5_05,
4_98,
4_91,
4_84,
4_83,
4_74,
4_66,
4_57,
4_49,
4_40,
4_39,
4_28,
4_18,
4_07,
3_96,
3_95,
3_81,
3_66,
3_52,
3_51,
3_30,
3_08,
3_07,
2_86,
2_64,
2_63,
2_42,
2_20,
2_19,
1_76,
1_75,
1_32,
1_31,
88,
44,
0,
]
_lowercase : List[Any] =[
9_99,
9_91,
9_82,
9_74,
9_66,
9_58,
9_50,
9_41,
9_33,
9_25,
9_16,
9_08,
9_00,
8_99,
8_74,
8_50,
8_25,
8_00,
7_99,
7_00,
6_00,
5_00,
4_00,
3_00,
2_00,
1_00,
0,
]
_lowercase : List[Any] =[
9_99,
9_92,
9_85,
9_78,
9_71,
9_64,
9_57,
9_49,
9_42,
9_35,
9_28,
9_21,
9_14,
9_07,
9_00,
8_99,
8_79,
8_59,
8_40,
8_20,
8_00,
7_99,
7_66,
7_33,
7_00,
6_99,
6_50,
6_00,
5_99,
5_00,
4_99,
4_00,
3_99,
3_00,
2_99,
2_00,
1_99,
1_00,
99,
0,
]
_lowercase : List[Any] =[
9_99,
9_96,
9_92,
9_89,
9_85,
9_82,
9_79,
9_75,
9_72,
9_68,
9_65,
9_61,
9_58,
9_55,
9_51,
9_48,
9_44,
9_41,
9_38,
9_34,
9_31,
9_27,
9_24,
9_20,
9_17,
9_14,
9_10,
9_07,
9_03,
9_00,
8_99,
8_91,
8_84,
8_76,
8_69,
8_61,
8_53,
8_46,
8_38,
8_30,
8_23,
8_15,
8_08,
8_00,
7_99,
7_88,
7_77,
7_66,
7_55,
7_44,
7_33,
7_22,
7_11,
7_00,
6_99,
6_88,
6_77,
6_66,
6_55,
6_44,
6_33,
6_22,
6_11,
6_00,
5_99,
5_85,
5_71,
5_57,
5_42,
5_28,
5_14,
5_00,
4_99,
4_85,
4_71,
4_57,
4_42,
4_28,
4_14,
4_00,
3_99,
3_79,
3_59,
3_40,
3_20,
3_00,
2_99,
2_79,
2_59,
2_40,
2_20,
2_00,
1_99,
1_66,
1_33,
1_00,
99,
66,
33,
0,
]
| 709
|
import logging
import os
import quant_trainer
import torch
from torch.utils.data import DataLoader
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput
_lowercase : int =logging.getLogger(__name__)
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCAmelCase_ ( A_ ):
'''simple docstring'''
def __init__( self , *lowerCamelCase , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , **lowerCamelCase ):
'''simple docstring'''
super().__init__(*lowerCamelCase , **lowerCamelCase )
a__ = eval_examples
a__ = post_process_function
a__ = quant_trainer_args
a__ = 128 # default number of calibration samples
def _A ( self , lowerCamelCase=None ):
'''simple docstring'''
if calib_dataset is None and self.calib_dataset is None:
raise ValueError("""Trainer: calibration requires an calib_dataset.""" )
a__ = calib_dataset if calib_dataset is not None else self.calib_dataset
a__ = self._remove_unused_columns(lowerCamelCase , description="""Calibration""" )
return DataLoader(
lowerCamelCase , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=lowerCamelCase , )
def _A ( self , lowerCamelCase=None ):
'''simple docstring'''
a__ = self.train_dataset if calib_dataset is None else calib_dataset
a__ = self.get_calib_dataloader(lowerCamelCase )
a__ = self.model
quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args , calib=lowerCamelCase )
model.eval()
quant_trainer.enable_calibration(lowerCamelCase )
logger.info("""***** Running calibration *****""" )
logger.info(f' Num examples = {self.calib_num}' )
logger.info(f' Batch size = {calib_dataloader.batch_size}' )
for step, inputs in enumerate(lowerCamelCase ):
# Prediction step
a__ , a__ , a__ = self.prediction_step(lowerCamelCase , lowerCamelCase , prediction_loss_only=lowerCamelCase )
if (step + 1) * calib_dataloader.batch_size >= self.calib_num:
break
quant_trainer.finish_calibration(lowerCamelCase , self.quant_trainer_args )
a__ = model
def _A ( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase = "eval" ):
'''simple docstring'''
a__ = self.eval_dataset if eval_dataset is None else eval_dataset
a__ = self.get_eval_dataloader(lowerCamelCase )
a__ = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a__ = self.compute_metrics
a__ = None
a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ = eval_loop(
lowerCamelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , )
finally:
a__ = compute_metrics
if self.post_process_function is not None and self.compute_metrics is not None:
a__ = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions )
a__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
a__ = metrics.pop(lowerCamelCase )
self.log(lowerCamelCase )
else:
a__ = {}
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a__ = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowerCamelCase )
return metrics
def _A ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=None , lowerCamelCase = "test" ):
'''simple docstring'''
a__ = self.get_test_dataloader(lowerCamelCase )
# Temporarily disable metric computation, we will do it in the loop here.
a__ = self.compute_metrics
a__ = None
a__ = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
a__ = eval_loop(
lowerCamelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowerCamelCase , )
finally:
a__ = compute_metrics
if self.post_process_function is None or self.compute_metrics is None:
return output
a__ = self.post_process_function(lowerCamelCase , lowerCamelCase , output.predictions , """predict""" )
a__ = self.compute_metrics(lowerCamelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
a__ = metrics.pop(lowerCamelCase )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowerCamelCase )
def _A ( self , lowerCamelCase="./" ):
'''simple docstring'''
a__ = self.eval_dataset
a__ = self.get_eval_dataloader(lowerCamelCase )
a__ = next(iter(lowerCamelCase ) )
# saving device - to make it consistent
a__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
# convert to tuple
a__ = tuple(v.to(lowerCamelCase ) for k, v in batch.items() )
logger.info("""Converting model to be onnx compatible""" )
from pytorch_quantization.nn import TensorQuantizer
a__ = True
a__ = self.model.to(lowerCamelCase )
model.eval()
model.float()
a__ = model.module if hasattr(lowerCamelCase , """module""" ) else model
quant_trainer.configure_model(lowerCamelCase , self.quant_trainer_args )
a__ = os.path.join(lowerCamelCase , """model.onnx""" )
logger.info(f'exporting model to {output_model_file}' )
a__ = {0: """batch_size""", 1: """seq_len"""}
torch.onnx.export(
lowerCamelCase , lowerCamelCase , lowerCamelCase , export_params=lowerCamelCase , opset_version=13 , do_constant_folding=lowerCamelCase , input_names=["""input_ids""", """attention_mask""", """token_type_ids"""] , output_names=["""output_start_logits""", """output_end_logits"""] , dynamic_axes={
"""input_ids""": axes,
"""attention_mask""": axes,
"""token_type_ids""": axes,
"""output_start_logits""": axes,
"""output_end_logits""": axes,
} , verbose=lowerCamelCase , )
logger.info("""onnx export finished""" )
| 412
| 0
|
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class _lowerCamelCase( enum.Enum ):
lowercase_ : int = 0
lowercase_ : str = 1
lowercase_ : Optional[Any] = 2
@add_end_docstrings(_a )
class _lowerCamelCase( _a ):
lowercase_ : List[str] = """
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
"""
def __init__( self, *lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
super().__init__(*lowerCamelCase, **lowerCamelCase)
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING)
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_lowercase : int = None
if self.model.config.prefix is not None:
_lowercase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_lowercase : str = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_lowercase , _lowercase , _lowercase : Optional[Any] = self._sanitize_parameters(prefix=lowerCamelCase, **self._forward_params)
_lowercase : Dict = {**self._preprocess_params, **preprocess_params}
_lowercase : str = {**self._forward_params, **forward_params}
def UpperCamelCase ( self, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, lowerCamelCase=None, **lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : List[str] = {}
if prefix is not None:
_lowercase : str = prefix
if prefix:
_lowercase : Dict = self.tokenizer(
lowerCamelCase, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework)
_lowercase : Any = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F'''{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected'''
' [None, \'hole\']')
_lowercase : str = handle_long_generation
preprocess_params.update(lowerCamelCase)
_lowercase : Optional[int] = generate_kwargs
_lowercase : Union[str, Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`')
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`')
_lowercase : Union[str, Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`')
_lowercase : List[str] = ReturnType.TENSORS
if return_type is not None:
_lowercase : Tuple = return_type
if clean_up_tokenization_spaces is not None:
_lowercase : Optional[Any] = clean_up_tokenization_spaces
if stop_sequence is not None:
_lowercase : List[Any] = self.tokenizer.encode(lowerCamelCase, add_special_tokens=lowerCamelCase)
if len(lowerCamelCase) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.')
_lowercase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase ( self, *lowerCamelCase, **lowerCamelCase) -> Tuple:
"""simple docstring"""
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True})
return super()._parse_and_tokenize(*lowerCamelCase, **lowerCamelCase)
def __call__( self, lowerCamelCase, **lowerCamelCase) -> int:
"""simple docstring"""
return super().__call__(lowerCamelCase, **lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase="", lowerCamelCase=None, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : List[str] = self.tokenizer(
prefix + prompt_text, padding=lowerCamelCase, add_special_tokens=lowerCamelCase, return_tensors=self.framework)
_lowercase : Optional[int] = prompt_text
if handle_long_generation == "hole":
_lowercase : Dict = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
_lowercase : Any = generate_kwargs['max_new_tokens']
else:
_lowercase : Tuple = generate_kwargs.get('max_length', self.model.config.max_length) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected')
if cur_len + new_tokens > self.tokenizer.model_max_length:
_lowercase : Optional[Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length')
_lowercase : Tuple = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
_lowercase : Optional[int] = inputs['attention_mask'][:, -keep_length:]
return inputs
def UpperCamelCase ( self, lowerCamelCase, **lowerCamelCase) -> Dict:
"""simple docstring"""
_lowercase : Any = model_inputs['input_ids']
_lowercase : str = model_inputs.get('attention_mask', lowerCamelCase)
# Allow empty prompts
if input_ids.shape[1] == 0:
_lowercase : List[str] = None
_lowercase : int = None
_lowercase : str = 1
else:
_lowercase : Dict = input_ids.shape[0]
_lowercase : int = model_inputs.pop('prompt_text')
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_lowercase : Optional[int] = generate_kwargs.pop('prefix_length', 0)
if prefix_length > 0:
_lowercase : int = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
_lowercase : Union[str, Any] = generate_kwargs.get('max_length') or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_lowercase : str = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_lowercase : Dict = self.model.generate(input_ids=lowerCamelCase, attention_mask=lowerCamelCase, **lowerCamelCase)
_lowercase : int = generated_sequence.shape[0]
if self.framework == "pt":
_lowercase : Optional[Any] = generated_sequence.reshape(lowerCamelCase, out_b // in_b, *generated_sequence.shape[1:])
elif self.framework == "tf":
_lowercase : Optional[int] = tf.reshape(lowerCamelCase, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=ReturnType.FULL_TEXT, lowerCamelCase=True) -> List[Any]:
"""simple docstring"""
_lowercase : Tuple = model_outputs['generated_sequence'][0]
_lowercase : str = model_outputs['input_ids']
_lowercase : Any = model_outputs['prompt_text']
_lowercase : str = generated_sequence.numpy().tolist()
_lowercase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_lowercase : Dict = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_lowercase : Union[str, Any] = self.tokenizer.decode(
lowerCamelCase, skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_lowercase : Union[str, Any] = 0
else:
_lowercase : Dict = len(
self.tokenizer.decode(
input_ids[0], skip_special_tokens=lowerCamelCase, clean_up_tokenization_spaces=lowerCamelCase, ))
if return_type == ReturnType.FULL_TEXT:
_lowercase : int = prompt_text + text[prompt_length:]
else:
_lowercase : List[str] = text[prompt_length:]
_lowercase : Dict = {'generated_text': all_text}
records.append(lowerCamelCase)
return records
| 89
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
__magic_name__ = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
__magic_name__ = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
a_ = BartTokenizer
def __init__( self : str ,_a : Any=None ,_a : Optional[int]=None ,_a : int=None ,_a : Optional[int]="replace" ,_a : Dict="<s>" ,_a : Optional[Any]="</s>" ,_a : Dict="</s>" ,_a : Tuple="<s>" ,_a : Optional[Any]="<unk>" ,_a : List[str]="<pad>" ,_a : int="<mask>" ,_a : str=False ,_a : List[str]=True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(
_a ,_a ,tokenizer_file=_a ,errors=_a ,bos_token=_a ,eos_token=_a ,sep_token=_a ,cls_token=_a ,unk_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,trim_offsets=_a ,**_a ,)
A_ : Dict = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : List[str] = getattr(_a ,pre_tok_state.pop("""type""" ) )
A_ : Optional[int] = add_prefix_space
A_ : int = pre_tok_class(**_a )
A_ : str = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A_ : str = """post_processor"""
A_ : List[Any] = getattr(self.backend_tokenizer ,_a ,_a )
if tokenizer_component_instance:
A_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A_ : Tuple = tuple(state["""sep"""] )
if "cls" in state:
A_ : Tuple = tuple(state["""cls"""] )
A_ : List[str] = False
if state.get("""add_prefix_space""" ,_a ) != add_prefix_space:
A_ : Dict = add_prefix_space
A_ : Any = True
if state.get("""trim_offsets""" ,_a ) != trim_offsets:
A_ : Union[str, Any] = trim_offsets
A_ : List[Any] = True
if changes_to_apply:
A_ : Optional[int] = getattr(_a ,state.pop("""type""" ) )
A_ : Tuple = component_class(**_a )
setattr(self.backend_tokenizer ,_a ,_a )
@property
def _a ( self : List[str] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _a ( self : Union[str, Any] ,_a : Any ):
'''simple docstring'''
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else value
A_ : List[Any] = value
def _a ( self : str ,*_a : str ,**_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_a ,**_a )
def _a ( self : str ,*_a : List[Any] ,**_a : str ):
'''simple docstring'''
A_ : List[str] = kwargs.get("""is_split_into_words""" ,_a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_a ,**_a )
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : str = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
def _a ( self : str ,_a : Optional[int] ,_a : int=None ):
'''simple docstring'''
A_ : Optional[Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _a ( self : Optional[int] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 665
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
_lowercase = [[1, 2, 4], [1, 2, 3, 4]]
_lowercase = DisjunctiveConstraint(__A )
self.assertTrue(isinstance(dc.token_ids ,__A ) )
with self.assertRaises(__A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(__A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __UpperCAmelCase ( self : List[Any] ) -> str:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
_lowercase = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(__A ):
DisjunctiveConstraint(__A ) # fails here
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
_lowercase = [[1, 2, 3], [1, 2, 4]]
_lowercase = DisjunctiveConstraint(__A )
_lowercase , _lowercase , _lowercase = dc.update(1 )
_lowercase = stepped is True and completed is False and reset is False
self.assertTrue(__A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowercase , _lowercase , _lowercase = dc.update(2 )
_lowercase = stepped is True and completed is False and reset is False
self.assertTrue(__A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowercase , _lowercase , _lowercase = dc.update(3 )
_lowercase = stepped is True and completed is True and reset is False
self.assertTrue(__A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
_lowercase = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
_lowercase = DisjunctiveConstraint(__A )
_lowercase , _lowercase , _lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
_lowercase , _lowercase , _lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
_lowercase , _lowercase , _lowercase = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
_lowercase , _lowercase , _lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
_lowercase , _lowercase , _lowercase = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
_lowercase , _lowercase , _lowercase = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
_lowercase , _lowercase , _lowercase = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 535
|
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""EleutherAI/gpt-j-6B""": """https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json""",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class A_ ( UpperCAmelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = '''gptj'''
SCREAMING_SNAKE_CASE_ : int = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : str ,__A : List[str]=5_0400 ,__A : Dict=2048 ,__A : List[str]=4096 ,__A : Dict=28 ,__A : Optional[int]=16 ,__A : Tuple=64 ,__A : Optional[int]=None ,__A : Optional[int]="gelu_new" ,__A : Dict=0.0 ,__A : List[str]=0.0 ,__A : Optional[int]=0.0 ,__A : str=1e-5 ,__A : List[Any]=0.02 ,__A : str=True ,__A : int=5_0256 ,__A : Union[str, Any]=5_0256 ,__A : int=False ,**__A : List[Any] ,) -> str:
_lowercase = vocab_size
_lowercase = n_positions
_lowercase = n_embd
_lowercase = n_layer
_lowercase = n_head
_lowercase = n_inner
_lowercase = rotary_dim
_lowercase = activation_function
_lowercase = resid_pdrop
_lowercase = embd_pdrop
_lowercase = attn_pdrop
_lowercase = layer_norm_epsilon
_lowercase = initializer_range
_lowercase = use_cache
_lowercase = bos_token_id
_lowercase = eos_token_id
super().__init__(
bos_token_id=__A ,eos_token_id=__A ,tie_word_embeddings=__A ,**__A )
class A_ ( UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple ,__A : PretrainedConfig ,__A : str = "default" ,__A : List[PatchingSpec] = None ,__A : bool = False ,) -> Any:
super().__init__(__A ,task=__A ,patching_specs=__A ,use_past=__A )
if not getattr(self._config ,'pad_token_id' ,__A ):
# TODO: how to do that better?
_lowercase = 0
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
_lowercase = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(__A ,direction='inputs' )
_lowercase = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_lowercase = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __UpperCAmelCase ( self : Union[str, Any] ) -> int:
return self._config.n_layer
@property
def __UpperCAmelCase ( self : Tuple ) -> int:
return self._config.n_head
def __UpperCAmelCase ( self : Any ,__A : PreTrainedTokenizer ,__A : int = -1 ,__A : int = -1 ,__A : bool = False ,__A : Optional[TensorType] = None ,) -> Mapping[str, Any]:
_lowercase = super(__A ,self ).generate_dummy_inputs(
__A ,batch_size=__A ,seq_length=__A ,is_pair=__A ,framework=__A )
# We need to order the input in the way they appears in the forward()
_lowercase = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_lowercase , _lowercase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_lowercase = seqlen + 2
_lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_lowercase = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(self.num_layers )
]
_lowercase = common_inputs['attention_mask']
if self.use_past:
_lowercase = ordered_inputs['attention_mask'].dtype
_lowercase = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(__A ,__A ,dtype=__A )] ,dim=1 )
return ordered_inputs
@property
def __UpperCAmelCase ( self : str ) -> int:
return 13
| 535
| 1
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def _SCREAMING_SNAKE_CASE ( lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = 'imagenet-1k-id2label.json'
lowerCamelCase_ = json.load(open(hf_hub_download(lowercase , lowercase , repo_type='dataset' ) , 'r' ) )
lowerCamelCase_ = {int(lowercase ): v for k, v in idalabel.items()}
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
lowerCamelCase_ = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCamelCase_ = BitConfig(
conv_layer=lowercase , num_labels=10_00 , idalabel=lowercase , labelaid=lowercase , )
return config
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
if "stem.conv" in name:
lowerCamelCase_ = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
lowerCamelCase_ = name.replace('blocks' , 'layers' )
if "head.fc" in name:
lowerCamelCase_ = name.replace('head.fc' , 'classifier.1' )
if name.startswith('norm' ):
lowerCamelCase_ = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCamelCase_ = 'bit.encoder.' + name
return name
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase_ = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return im
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( lowercase : str , lowercase : int , lowercase : Union[str, Any]=False ):
'''simple docstring'''
lowerCamelCase_ = get_config(lowercase )
# load original model from timm
lowerCamelCase_ = create_model(lowercase , pretrained=lowercase )
timm_model.eval()
# load state_dict of original model
lowerCamelCase_ = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCamelCase_ = state_dict.pop(lowercase )
lowerCamelCase_ = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCamelCase_ = BitForImageClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# create image processor
lowerCamelCase_ = create_transform(**resolve_data_config({} , model=lowercase ) )
lowerCamelCase_ = transform.transforms
lowerCamelCase_ = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCamelCase_ = BitImageProcessor(
do_resize=lowercase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowercase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=lowercase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCamelCase_ = prepare_img()
lowerCamelCase_ = transform(lowercase ).unsqueeze(0 )
lowerCamelCase_ = processor(lowercase , return_tensors='pt' ).pixel_values
# verify pixel values
assert torch.allclose(lowercase , lowercase )
# verify logits
with torch.no_grad():
lowerCamelCase_ = model(lowercase )
lowerCamelCase_ = outputs.logits
print('Logits:' , logits[0, :3] )
print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCamelCase_ = timm_model(lowercase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowercase , outputs.logits , atol=1e-3 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(f"""Saving model {model_name} and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f"""Pushing model {model_name} and processor to the hub""" )
model.push_to_hub(f"""ybelkada/{model_name}""" )
processor.push_to_hub(f"""ybelkada/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
lowerCamelCase : Optional[int] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 70
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case_ : str = {
'''configuration_table_transformer''': [
'''TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TableTransformerConfig''',
'''TableTransformerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ : str = [
'''TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TableTransformerForObjectDetection''',
'''TableTransformerModel''',
'''TableTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
snake_case_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 691
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase =logging.get_logger(__name__)
_UpperCamelCase ={
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class _SCREAMING_SNAKE_CASE ( __A ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'swinv2'
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , _snake_case=2_24 , _snake_case=4 , _snake_case=3 , _snake_case=96 , _snake_case=[2, 2, 6, 2] , _snake_case=[3, 6, 12, 24] , _snake_case=7 , _snake_case=4.0 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=0.1 , _snake_case="gelu" , _snake_case=False , _snake_case=0.0_2 , _snake_case=1E-5 , _snake_case=32 , **_snake_case , ):
"""simple docstring"""
super().__init__(**_snake_case )
__lowerCamelCase = image_size
__lowerCamelCase = patch_size
__lowerCamelCase = num_channels
__lowerCamelCase = embed_dim
__lowerCamelCase = depths
__lowerCamelCase = len(_snake_case )
__lowerCamelCase = num_heads
__lowerCamelCase = window_size
__lowerCamelCase = mlp_ratio
__lowerCamelCase = qkv_bias
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = drop_path_rate
__lowerCamelCase = hidden_act
__lowerCamelCase = use_absolute_embeddings
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = initializer_range
__lowerCamelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__lowerCamelCase = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
__lowerCamelCase = (0, 0, 0, 0)
| 704
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['input_features', 'attention_mask']
def __init__( self , _snake_case=80 , _snake_case=1_60_00 , _snake_case=80 , _snake_case=0.0 , _snake_case=True , _snake_case=True , _snake_case=True , **_snake_case , ):
"""simple docstring"""
super().__init__(feature_size=_snake_case , sampling_rate=_snake_case , padding_value=_snake_case , **_snake_case )
__lowerCamelCase = num_mel_bins
__lowerCamelCase = do_ceptral_normalize
__lowerCamelCase = normalize_means
__lowerCamelCase = normalize_vars
__lowerCamelCase = True
def _lowerCamelCase ( self , _snake_case , ):
"""simple docstring"""
__lowerCamelCase = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
__lowerCamelCase = torch.from_numpy(_snake_case ).unsqueeze(0 )
__lowerCamelCase = ta_kaldi.fbank(_snake_case , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _lowerCamelCase ( _snake_case , _snake_case , _snake_case = True , _snake_case = True , _snake_case = 0.0 , ):
"""simple docstring"""
if normalize_means:
__lowerCamelCase = x[:input_length].mean(axis=0 )
__lowerCamelCase = np.subtract(_snake_case , _snake_case )
if normalize_vars:
__lowerCamelCase = x[:input_length].std(axis=0 )
__lowerCamelCase = np.divide(_snake_case , _snake_case )
if input_length < x.shape[0]:
__lowerCamelCase = padding_value
# make sure array is in float32
__lowerCamelCase = x.astype(np.floataa )
return x
def _lowerCamelCase ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
__lowerCamelCase = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(_snake_case , _snake_case , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(_snake_case , _snake_case )
]
def __call__( self , _snake_case , _snake_case = False , _snake_case = None , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , **_snake_case , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
F''' {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with'''
F''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
__lowerCamelCase = isinstance(_snake_case , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' )
__lowerCamelCase = is_batched_numpy or (
isinstance(_snake_case , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowerCamelCase = [np.asarray(_snake_case , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_snake_case , np.ndarray ):
__lowerCamelCase = np.asarray(_snake_case , dtype=np.floataa )
elif isinstance(_snake_case , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowerCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowerCamelCase = [raw_speech]
# extract fbank features
__lowerCamelCase = [self._extract_fbank_features(_snake_case ) for waveform in raw_speech]
# convert into correct format for padding
__lowerCamelCase = BatchFeature({'''input_features''': features} )
__lowerCamelCase = self.pad(
_snake_case , padding=_snake_case , max_length=_snake_case , truncation=_snake_case , pad_to_multiple_of=_snake_case , return_attention_mask=_snake_case , **_snake_case , )
# make sure list is in array format
__lowerCamelCase = padded_inputs.get('''input_features''' )
if isinstance(input_features[0] , _snake_case ):
__lowerCamelCase = [np.asarray(_snake_case , dtype=np.floataa ) for feature in input_features]
__lowerCamelCase = padded_inputs.get('''attention_mask''' )
if attention_mask is not None:
__lowerCamelCase = [np.asarray(_snake_case , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
__lowerCamelCase = (
np.array(_snake_case , dtype=np.intaa )
if self._get_padding_strategies(_snake_case , max_length=_snake_case ) is not PaddingStrategy.DO_NOT_PAD
else None
)
__lowerCamelCase = self.normalize(
padded_inputs['''input_features'''] , attention_mask=_snake_case )
if return_tensors is not None:
__lowerCamelCase = padded_inputs.convert_to_tensors(_snake_case )
return padded_inputs
| 575
| 0
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__A = logging.get_logger(__name__)
__A = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__A = {
"""vocab_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/vocab.txt""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/vocab.txt""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"""
),
"""bert-base-multilingual-cased""": """https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt""",
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"""
),
"""bert-base-german-dbmdz-cased""": """https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt""",
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""bert-base-uncased""": """https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json""",
"""bert-large-uncased""": """https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json""",
"""bert-base-cased""": """https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json""",
"""bert-large-cased""": """https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json""",
"""bert-base-multilingual-uncased""": (
"""https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"""
),
"""bert-base-multilingual-cased""": (
"""https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
"""bert-base-chinese""": """https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json""",
"""bert-base-german-cased""": """https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json""",
"""bert-large-uncased-whole-word-masking""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"""
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
"""https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"""
),
"""bert-base-cased-finetuned-mrpc""": (
"""https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-cased""": (
"""https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"""
),
"""bert-base-german-dbmdz-uncased""": (
"""https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-cased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"""
),
"""TurkuNLP/bert-base-finnish-uncased-v1""": (
"""https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"""
),
"""wietsedv/bert-base-dutch-cased""": (
"""https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"""
),
},
}
__A = {
"""bert-base-uncased""": 512,
"""bert-large-uncased""": 512,
"""bert-base-cased""": 512,
"""bert-large-cased""": 512,
"""bert-base-multilingual-uncased""": 512,
"""bert-base-multilingual-cased""": 512,
"""bert-base-chinese""": 512,
"""bert-base-german-cased""": 512,
"""bert-large-uncased-whole-word-masking""": 512,
"""bert-large-cased-whole-word-masking""": 512,
"""bert-large-uncased-whole-word-masking-finetuned-squad""": 512,
"""bert-large-cased-whole-word-masking-finetuned-squad""": 512,
"""bert-base-cased-finetuned-mrpc""": 512,
"""bert-base-german-dbmdz-cased""": 512,
"""bert-base-german-dbmdz-uncased""": 512,
"""TurkuNLP/bert-base-finnish-cased-v1""": 512,
"""TurkuNLP/bert-base-finnish-uncased-v1""": 512,
"""wietsedv/bert-base-dutch-cased""": 512,
}
__A = {
"""bert-base-uncased""": {"""do_lower_case""": True},
"""bert-large-uncased""": {"""do_lower_case""": True},
"""bert-base-cased""": {"""do_lower_case""": False},
"""bert-large-cased""": {"""do_lower_case""": False},
"""bert-base-multilingual-uncased""": {"""do_lower_case""": True},
"""bert-base-multilingual-cased""": {"""do_lower_case""": False},
"""bert-base-chinese""": {"""do_lower_case""": False},
"""bert-base-german-cased""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking""": {"""do_lower_case""": False},
"""bert-large-uncased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": True},
"""bert-large-cased-whole-word-masking-finetuned-squad""": {"""do_lower_case""": False},
"""bert-base-cased-finetuned-mrpc""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-cased""": {"""do_lower_case""": False},
"""bert-base-german-dbmdz-uncased""": {"""do_lower_case""": True},
"""TurkuNLP/bert-base-finnish-cased-v1""": {"""do_lower_case""": False},
"""TurkuNLP/bert-base-finnish-uncased-v1""": {"""do_lower_case""": True},
"""wietsedv/bert-base-dutch-cased""": {"""do_lower_case""": False},
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :int = VOCAB_FILES_NAMES
__magic_name__ :List[str] = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ :Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
__magic_name__ :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ :Union[str, Any] = BertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase="[UNK]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="[PAD]" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , __UpperCAmelCase=True , __UpperCAmelCase=None , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenize_chinese_chars=__UpperCAmelCase , strip_accents=__UpperCAmelCase , **__UpperCAmelCase , )
lowerCAmelCase__ :Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , __UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __UpperCAmelCase ) != tokenize_chinese_chars
):
lowerCAmelCase__ :Tuple = getattr(__UpperCAmelCase , normalizer_state.pop('type' ) )
lowerCAmelCase__ :Optional[Any] = do_lower_case
lowerCAmelCase__ :List[str] = strip_accents
lowerCAmelCase__ :Optional[int] = tokenize_chinese_chars
lowerCAmelCase__ :Tuple = normalizer_class(**__UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = do_lower_case
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase=None ):
'''simple docstring'''
lowerCAmelCase__ :Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Any = [self.sep_token_id]
lowerCAmelCase__ :List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase )
return tuple(__UpperCAmelCase )
| 93
|
"""simple docstring"""
import functools
def lowercase__ ( lowerCAmelCase__ : str , lowerCAmelCase__ : str ) -> int:
'''simple docstring'''
a__ : Any = len(lowerCAmelCase__ )
a__ : Optional[int] = len(lowerCAmelCase__ )
@functools.cache
def min_distance(lowerCAmelCase__ : int , lowerCAmelCase__ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
a__ : List[Any] = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , lowerCAmelCase__ ) , 1 + min_distance(lowerCAmelCase__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642
| 0
|
"""simple docstring"""
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class A_(Generic[T] ):
"""simple docstring"""
def __init__( self , A = True ):
_lowerCamelCase : dict[T, list[T]] = {} # dictionary of lists
_lowerCamelCase : Dict = directed
def _lowerCAmelCase ( self , A , A ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
self.adj_list[destination_vertex].append(A )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
_lowerCamelCase : Optional[Any] = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(A )
_lowerCamelCase : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
_lowerCamelCase : str = [destination_vertex]
_lowerCamelCase : Optional[int] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(A )
_lowerCamelCase : Optional[int] = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
_lowerCamelCase : str = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
_lowerCamelCase : Union[str, Any] = [destination_vertex]
_lowerCamelCase : str = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 708
|
"""simple docstring"""
def UpperCAmelCase_ ( __a : Dict , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : Any = ''
for i in table:
res += inp[i - 1]
return res
def UpperCAmelCase_ ( __a : List[Any] ):
'''simple docstring'''
return data[1:] + data[0]
def UpperCAmelCase_ ( __a : Any , __a : Optional[int] ):
'''simple docstring'''
_lowerCamelCase : List[str] = ''
for i in range(len(__a ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def UpperCAmelCase_ ( __a : List[Any] , __a : str ):
'''simple docstring'''
_lowerCamelCase : str = int('0b' + data[0] + data[-1] , 2 )
_lowerCamelCase : Optional[int] = int('0b' + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def UpperCAmelCase_ ( __a : Dict , __a : int , __a : str , __a : Union[str, Any] , __a : Optional[Any] ):
'''simple docstring'''
_lowerCamelCase : str = message[:4]
_lowerCamelCase : Optional[Any] = message[4:]
_lowerCamelCase : Union[str, Any] = apply_table(__a , __a )
_lowerCamelCase : int = xor(__a , __a )
_lowerCamelCase : str = apply_sbox(__a , temp[:4] ) # noqa: E741
_lowerCamelCase : Any = apply_sbox(__a , temp[4:] )
_lowerCamelCase : Dict = '0' * (2 - len(__a )) + l # noqa: E741
_lowerCamelCase : Optional[Any] = '0' * (2 - len(__a )) + r
_lowerCamelCase : Tuple = apply_table(l + r , __a )
_lowerCamelCase : Tuple = xor(__a , __a )
return temp + right
if __name__ == "__main__":
a_ = input("""Enter 10 bit key: """)
a_ = input("""Enter 8 bit message: """)
a_ = [6, 3, 7, 4, 8, 5, 10, 9]
a_ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
a_ = [2, 4, 3, 1]
a_ = [2, 6, 3, 1, 4, 8, 5, 7]
a_ = [4, 1, 3, 5, 7, 2, 8, 6]
a_ = [4, 1, 2, 3, 2, 3, 4, 1]
a_ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a_ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a_ = apply_table(key, paa_table)
a_ = temp[:5]
a_ = temp[5:]
a_ = left_shift(left)
a_ = left_shift(right)
a_ = apply_table(left + right, pa_table)
a_ = left_shift(left)
a_ = left_shift(right)
a_ = left_shift(left)
a_ = left_shift(right)
a_ = apply_table(left + right, pa_table)
# encryption
a_ = apply_table(message, IP)
a_ = function(expansion, sa, sa, keya, temp)
a_ = temp[4:] + temp[:4]
a_ = function(expansion, sa, sa, keya, temp)
a_ = apply_table(temp, IP_inv)
print("""Cipher text is:""", CT)
# decryption
a_ = apply_table(CT, IP)
a_ = function(expansion, sa, sa, keya, temp)
a_ = temp[4:] + temp[:4]
a_ = function(expansion, sa, sa, keya, temp)
a_ = apply_table(temp, IP_inv)
print("""Plain text after decypting is:""", PT)
| 349
| 0
|
from __future__ import annotations
import math
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_ ) -> None:
UpperCamelCase : str = size
# approximate the overall size of segment tree with given value
UpperCamelCase : Dict = [0 for i in range(0, 4 * size )]
# create array to store lazy update
UpperCamelCase : Dict = [0 for i in range(0, 4 * size )]
UpperCamelCase : List[str] = [0 for i in range(0, 4 * size )] # flag for lazy update
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return idx * 2
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> int:
return idx * 2 + 1
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> None:
if left_element == right_element:
UpperCamelCase : Dict = a[left_element - 1]
else:
UpperCamelCase : str = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.build(self.right(SCREAMING_SNAKE_CASE_ ), mid + 1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )], self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> bool:
if self.flag[idx] is True:
UpperCamelCase : Union[str, Any] = self.lazy[idx]
UpperCamelCase : str = False
if left_element != right_element:
UpperCamelCase : int = self.lazy[idx]
UpperCamelCase : Tuple = self.lazy[idx]
UpperCamelCase : Optional[Any] = True
UpperCamelCase : Tuple = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
UpperCamelCase : List[str] = val
if left_element != right_element:
UpperCamelCase : Optional[int] = val
UpperCamelCase : Union[str, Any] = val
UpperCamelCase : Optional[int] = True
UpperCamelCase : List[Any] = True
return True
UpperCamelCase : Union[str, Any] = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.update(self.right(SCREAMING_SNAKE_CASE_ ), mid + 1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE_ )], self.segment_tree[self.right(SCREAMING_SNAKE_CASE_ )] )
return True
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int | float:
if self.flag[idx] is True:
UpperCamelCase : str = self.lazy[idx]
UpperCamelCase : Union[str, Any] = False
if left_element != right_element:
UpperCamelCase : Optional[Any] = self.lazy[idx]
UpperCamelCase : Optional[int] = self.lazy[idx]
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : int = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
UpperCamelCase : Any = (left_element + right_element) // 2
UpperCamelCase : int = self.query(self.left(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = self.query(self.right(SCREAMING_SNAKE_CASE_ ), mid + 1, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
return max(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __str__( self ) -> str:
return str([self.query(1, 1, self.size, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) for i in range(1, self.size + 1 )] )
if __name__ == "__main__":
__UpperCAmelCase = [1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
__UpperCAmelCase = 15
__UpperCAmelCase = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 40
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( snake_case__ : int ) -> Dict:
UpperCamelCase : Optional[Any] = tmp_path / 'file.csv'
UpperCamelCase : Optional[Any] = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> List[str]:
UpperCamelCase : Optional[Any] = tmp_path / 'malformed_file.csv'
UpperCamelCase : Any = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Optional[int] , snake_case__ : List[Any] ) -> str:
UpperCamelCase : Any = tmp_path / 'csv_with_image.csv'
UpperCamelCase : Dict = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : List[str] ) -> Tuple:
UpperCamelCase : List[str] = tmp_path / 'csv_with_label.csv'
UpperCamelCase : Dict = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
@pytest.fixture
def UpperCamelCase ( snake_case__ : Dict ) -> List[str]:
UpperCamelCase : List[str] = tmp_path / 'csv_with_int_list.csv'
UpperCamelCase : Union[str, Any] = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(snake_case__ , 'w' ) as f:
f.write(snake_case__ )
return str(snake_case__ )
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[Any] ) -> List[Any]:
UpperCamelCase : str = Csv()
UpperCamelCase : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(snake_case__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(snake_case__ ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( snake_case__ : Union[str, Any] ) -> Optional[int]:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : List[str] = f.read().splitlines()[1]
UpperCamelCase : int = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
UpperCamelCase : Any = csv._generate_tables([[csv_file_with_image]] )
UpperCamelCase : Any = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
UpperCamelCase : str = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( snake_case__ : Any ) -> str:
with open(snake_case__ , encoding='utf-8' ) as f:
UpperCamelCase : Any = f.read().splitlines()[1:]
UpperCamelCase : Union[str, Any] = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
UpperCamelCase : int = csv._generate_tables([[csv_file_with_label]] )
UpperCamelCase : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
UpperCamelCase : List[str] = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(snake_case__ ) for label in labels]
def UpperCamelCase ( snake_case__ : str ) -> List[Any]:
UpperCamelCase : str = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda snake_case__ : [int(snake_case__ ) for i in x.split()]} )
UpperCamelCase : List[str] = csv._generate_tables([[csv_file_with_int_list]] )
UpperCamelCase : Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
UpperCamelCase : str = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 40
| 1
|
'''simple docstring'''
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowercase :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : float
_SCREAMING_SNAKE_CASE : TreeNode | None = None
_SCREAMING_SNAKE_CASE : TreeNode | None = None
def UpperCamelCase_ ( snake_case_ : TreeNode | None ) -> Any:
'''simple docstring'''
def is_valid_tree(snake_case_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCAmelCase__ ):
raise ValueError(
"""Each node should be type of TreeNode and data should be float.""" )
def is_binary_search_tree_recursive_check(
snake_case_ : TreeNode | None , snake_case_ : float , snake_case_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCAmelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCAmelCase__ )
)
return is_binary_search_tree_recursive_check(lowerCAmelCase__ , -float("""inf""" ) , float("""inf""" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
'''simple docstring'''
def UpperCamelCase_ ( snake_case_ : int ) -> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError("""Input must be a positive integer""" )
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case_ ):
__lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_A : Tuple = int(input('''Enter a positive integer: ''').strip())
print(prime_sieve_eratosthenes(user_num))
| 330
| 0
|
'''simple docstring'''
def A (__lowerCamelCase :int = 10 , __lowerCamelCase :int = 1000 , __lowerCamelCase :bool = True ):
assert (
isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
and isinstance(__lowerCamelCase , __lowerCamelCase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def A (__lowerCamelCase :int , __lowerCamelCase :int ):
return int((number_a + number_a) / 2 )
def A (__lowerCamelCase :int , __lowerCamelCase :int , __lowerCamelCase :int ):
assert (
isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase ) and isinstance(__lowerCamelCase , __lowerCamelCase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(__lowerCamelCase :int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
_lowerCAmelCase = lower
_lowerCAmelCase = higher
_lowerCAmelCase = []
while True:
_lowerCAmelCase = get_avg(__lowerCamelCase , __lowerCamelCase )
last_numbers.append(__lowerCamelCase )
if answer(__lowerCamelCase ) == "low":
_lowerCAmelCase = number
elif answer(__lowerCamelCase ) == "high":
_lowerCAmelCase = number
else:
break
print(f'guess the number : {last_numbers[-1]}' )
print(f'details : {last_numbers!s}' )
def A ():
_lowerCAmelCase = int(input("""Enter lower value : """ ).strip() )
_lowerCAmelCase = int(input("""Enter high value : """ ).strip() )
_lowerCAmelCase = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5
|
'''simple docstring'''
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : Any = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
snake_case : Union[str, Any] = {
'b0': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1_280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1_408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1_536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1_792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2_048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2_304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2_560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
__lowercase = EfficientNetConfig()
__lowercase = CONFIG_MAP[model_name]["""hidden_dim"""]
__lowercase = CONFIG_MAP[model_name]["""width_coef"""]
__lowercase = CONFIG_MAP[model_name]["""depth_coef"""]
__lowercase = CONFIG_MAP[model_name]["""image_size"""]
__lowercase = CONFIG_MAP[model_name]["""dropout_rate"""]
__lowercase = CONFIG_MAP[model_name]["""dw_padding"""]
__lowercase = """huggingface/label-files"""
__lowercase = """imagenet-1k-id2label.json"""
__lowercase = 1000
__lowercase = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
__lowercase = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
__lowercase = idalabel
__lowercase = {v: k for k, v in idalabel.items()}
return config
def lowercase__ ( ):
'''simple docstring'''
__lowercase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw )
return im
def lowercase__ ( __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowercase = CONFIG_MAP[model_name]["""image_size"""]
__lowercase = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.4785_3944, 0.473_2864, 0.4743_4163] , do_center_crop=__UpperCamelCase , )
return preprocessor
def lowercase__ ( __UpperCamelCase : str ):
'''simple docstring'''
__lowercase = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
__lowercase = sorted(set(__UpperCamelCase ) )
__lowercase = len(__UpperCamelCase )
__lowercase = {b: str(__UpperCamelCase ) for b, i in zip(__UpperCamelCase , range(__UpperCamelCase ) )}
__lowercase = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
__lowercase = block_name_mapping[b]
rename_keys.append((F'''block{b}_expand_conv/kernel:0''', F'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((F'''block{b}_expand_bn/gamma:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((F'''block{b}_expand_bn/beta:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_expand_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(F'''block{b}_dwconv/depthwise_kernel:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((F'''block{b}_bn/gamma:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((F'''block{b}_bn/beta:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(F'''block{b}_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(F'''block{b}_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((F'''block{b}_se_reduce/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((F'''block{b}_se_reduce/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((F'''block{b}_se_expand/kernel:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((F'''block{b}_se_expand/bias:0''', F'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(F'''block{b}_project_conv/kernel:0''', F'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((F'''block{b}_project_bn/gamma:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((F'''block{b}_project_bn/beta:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_mean:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(F'''block{b}_project_bn/moving_variance:0''', F'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
__lowercase = {}
for item in rename_keys:
if item[0] in original_param_names:
__lowercase = """efficientnet.""" + item[1]
__lowercase = """classifier.weight"""
__lowercase = """classifier.bias"""
return key_mapping
def lowercase__ ( __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
for key, value in tf_params.items():
if "normalization" in key:
continue
__lowercase = key_mapping[key]
if "_conv" in key and "kernel" in key:
__lowercase = torch.from_numpy(__UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
__lowercase = torch.from_numpy(__UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
__lowercase = torch.from_numpy(np.transpose(__UpperCamelCase ) )
else:
__lowercase = torch.from_numpy(__UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(__UpperCamelCase )
@torch.no_grad()
def lowercase__ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str ):
'''simple docstring'''
__lowercase = model_classes[model_name](
include_top=__UpperCamelCase , weights="""imagenet""" , input_tensor=__UpperCamelCase , input_shape=__UpperCamelCase , pooling=__UpperCamelCase , classes=1000 , classifier_activation="""softmax""" , )
__lowercase = original_model.trainable_variables
__lowercase = original_model.non_trainable_variables
__lowercase = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
__lowercase = param.numpy()
__lowercase = list(tf_params.keys() )
# Load HuggingFace model
__lowercase = get_efficientnet_config(__UpperCamelCase )
__lowercase = EfficientNetForImageClassification(__UpperCamelCase ).eval()
__lowercase = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
__lowercase = rename_keys(__UpperCamelCase )
replace_params(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Initialize preprocessor and preprocess input image
__lowercase = convert_image_processor(__UpperCamelCase )
__lowercase = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
__lowercase = hf_model(**__UpperCamelCase )
__lowercase = outputs.logits.detach().numpy()
# Original model inference
__lowercase = False
__lowercase = CONFIG_MAP[model_name]["""image_size"""]
__lowercase = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
__lowercase = image.img_to_array(__UpperCamelCase )
__lowercase = np.expand_dims(__UpperCamelCase , axis=0 )
__lowercase = original_model.predict(__UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(__UpperCamelCase ):
os.mkdir(__UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(__UpperCamelCase )
preprocessor.save_pretrained(__UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F'''Pushing converted {model_name} to the hub...''' )
__lowercase = F'''efficientnet-{model_name}'''
preprocessor.push_to_hub(__UpperCamelCase )
hf_model.push_to_hub(__UpperCamelCase )
if __name__ == "__main__":
snake_case : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
snake_case : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 566
| 0
|
from __future__ import annotations
def __lowercase ( _UpperCAmelCase ) -> Any:
'''simple docstring'''
__lowercase = 2
__lowercase = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(lowerCAmelCase_ )
if n > 1:
factors.append(lowerCAmelCase_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707
|
from collections.abc import Sequence
def __lowercase ( _UpperCAmelCase = None ) -> int:
'''simple docstring'''
if nums is None or not nums:
raise ValueError("Input sequence should not be empty" )
__lowercase = nums[0]
for i in range(1 , len(_UpperCAmelCase ) ):
__lowercase = nums[i]
__lowercase = max(_UpperCAmelCase , ans + num , _UpperCAmelCase )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
lowerCAmelCase__ = int(input('Enter number of elements : ').strip())
lowerCAmelCase__ = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 576
| 0
|
"""simple docstring"""
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 222
|
"""simple docstring"""
from __future__ import annotations
def snake_case ( UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ) -> list[tuple[int, int]]:
lowerCamelCase , lowerCamelCase : Optional[int] = position
lowerCamelCase : Any = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowerCamelCase : Optional[Any] = []
for position in positions:
lowerCamelCase , lowerCamelCase : Dict = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def snake_case ( UpperCamelCase__ : list[list[int]] ) -> bool:
return not any(elem == 0 for row in board for elem in row )
def snake_case ( UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : tuple[int, int] , UpperCamelCase__ : int ) -> bool:
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ , len(UpperCamelCase__ ) ):
lowerCamelCase , lowerCamelCase : Union[str, Any] = position
if board[y][x] == 0:
lowerCamelCase : List[Any] = curr + 1
if open_knight_tour_helper(UpperCamelCase__ , UpperCamelCase__ , curr + 1 ):
return True
lowerCamelCase : int = 0
return False
def snake_case ( UpperCamelCase__ : int ) -> list[list[int]]:
lowerCamelCase : List[str] = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
lowerCamelCase : Any = 1
if open_knight_tour_helper(UpperCamelCase__ , (i, j) , 1 ):
return board
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : List[Any] = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 222
| 1
|
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : BigBirdConfig
_SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa
_SCREAMING_SNAKE_CASE : bool = True
def lowerCAmelCase__ ( self ):
super().setup()
a =nn.Dense(5 , dtype=self.dtype )
def __call__( self , *_lowerCAmelCase , **_lowerCAmelCase ):
a =super().__call__(*_lowerCAmelCase , **_lowerCAmelCase )
a =self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class UpperCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[Any] = FlaxBigBirdForNaturalQuestionsModule
def lowerCamelCase ( UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] )-> Optional[int]:
"""simple docstring"""
def cross_entropy(UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=None ):
a =logits.shape[-1]
a =(labels[..., None] == jnp.arange(UpperCAmelCase_ )[None]).astype("""f4""" )
a =jax.nn.log_softmax(UpperCAmelCase_ , axis=-1 )
a =-jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
a =reduction(UpperCAmelCase_ )
return loss
a =partial(UpperCAmelCase_ , reduction=jnp.mean )
a =cross_entropy(UpperCAmelCase_ , UpperCAmelCase_ )
a =cross_entropy(UpperCAmelCase_ , UpperCAmelCase_ )
a =cross_entropy(UpperCAmelCase_ , UpperCAmelCase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : str = "google/bigbird-roberta-base"
_SCREAMING_SNAKE_CASE : int = 3000
_SCREAMING_SNAKE_CASE : int = 1_0500
_SCREAMING_SNAKE_CASE : int = 128
_SCREAMING_SNAKE_CASE : int = 3
_SCREAMING_SNAKE_CASE : int = 1
_SCREAMING_SNAKE_CASE : int = 5
# tx_args
_SCREAMING_SNAKE_CASE : float = 3E-5
_SCREAMING_SNAKE_CASE : float = 0.0
_SCREAMING_SNAKE_CASE : int = 2_0000
_SCREAMING_SNAKE_CASE : float = 0.0_0_9_5
_SCREAMING_SNAKE_CASE : str = "bigbird-roberta-natural-questions"
_SCREAMING_SNAKE_CASE : str = "training-expt"
_SCREAMING_SNAKE_CASE : str = "data/nq-training.jsonl"
_SCREAMING_SNAKE_CASE : str = "data/nq-validation.jsonl"
def lowerCAmelCase__ ( self ):
os.makedirs(self.base_dir , exist_ok=_lowerCAmelCase )
a =os.path.join(self.base_dir , self.save_dir )
a =self.batch_size_per_device * jax.device_count()
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : int = 4096 # no dynamic padding on TPUs
def __call__( self , _lowerCAmelCase ):
a =self.collate_fn(_lowerCAmelCase )
a =jax.tree_util.tree_map(_lowerCAmelCase , _lowerCAmelCase )
return batch
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a , a =self.fetch_inputs(features["""input_ids"""] )
a ={
"""input_ids""": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"""attention_mask""": jnp.array(_lowerCAmelCase , dtype=jnp.intaa ),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa ),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa ),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa ),
}
return batch
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =[self._fetch_inputs(_lowerCAmelCase ) for ids in input_ids]
return zip(*_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase ):
a =[1 for _ in range(len(_lowerCAmelCase ) )]
while len(_lowerCAmelCase ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int=None )-> int:
"""simple docstring"""
if seed is not None:
a =dataset.shuffle(seed=UpperCAmelCase_ )
for i in range(len(UpperCAmelCase_ ) // batch_size ):
a =dataset[i * batch_size : (i + 1) * batch_size]
yield dict(UpperCAmelCase_ )
@partial(jax.pmap , axis_name="""batch""" )
def lowerCamelCase ( UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[int] )-> List[Any]:
"""simple docstring"""
def loss_fn(UpperCAmelCase_ : Optional[Any] ):
a =model_inputs.pop("""start_labels""" )
a =model_inputs.pop("""end_labels""" )
a =model_inputs.pop("""pooled_labels""" )
a =state.apply_fn(**UpperCAmelCase_ , params=UpperCAmelCase_ , dropout_rng=UpperCAmelCase_ , train=UpperCAmelCase_ )
a , a , a =outputs
return state.loss_fn(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
a , a =jax.random.split(UpperCAmelCase_ )
a =jax.value_and_grad(UpperCAmelCase_ )
a , a =grad_fn(state.params )
a =jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
a =jax.lax.pmean(UpperCAmelCase_ , """batch""" )
a =state.apply_gradients(grads=UpperCAmelCase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def lowerCamelCase ( UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any] )-> str:
"""simple docstring"""
a =model_inputs.pop("""start_labels""" )
a =model_inputs.pop("""end_labels""" )
a =model_inputs.pop("""pooled_labels""" )
a =state.apply_fn(**UpperCAmelCase_ , params=state.params , train=UpperCAmelCase_ )
a , a , a =outputs
a =state.loss_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a =jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class UpperCAmelCase__ ( train_state.TrainState ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Callable = struct.field(pytree_node=UpperCAmelCase__ )
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Args
_SCREAMING_SNAKE_CASE : Callable
_SCREAMING_SNAKE_CASE : Callable
_SCREAMING_SNAKE_CASE : Callable
_SCREAMING_SNAKE_CASE : Callable
_SCREAMING_SNAKE_CASE : wandb
_SCREAMING_SNAKE_CASE : Callable = None
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ):
a =model.params
a =TrainState.create(
apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , loss_fn=_lowerCAmelCase , )
if ckpt_dir is not None:
a , a , a , a , a =restore_checkpoint(_lowerCAmelCase , _lowerCAmelCase )
a ={
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
a , a =build_tx(**_lowerCAmelCase )
a =train_state.TrainState(
step=_lowerCAmelCase , apply_fn=model.__call__ , params=_lowerCAmelCase , tx=_lowerCAmelCase , opt_state=_lowerCAmelCase , )
a =args
a =data_collator
a =lr
a =params
a =jax_utils.replicate(_lowerCAmelCase )
return state
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =self.args
a =len(_lowerCAmelCase ) // args.batch_size
a =jax.random.PRNGKey(0 )
a =jax.random.split(_lowerCAmelCase , jax.device_count() )
for epoch in range(args.max_epochs ):
a =jnp.array(0 , dtype=jnp.floataa )
a =get_batched_dataset(_lowerCAmelCase , args.batch_size , seed=_lowerCAmelCase )
a =0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc=F'''Running EPOCH-{epoch}''' ):
a =self.data_collator(_lowerCAmelCase )
a , a , a =self.train_step_fn(_lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
if i % args.logging_steps == 0:
a =jax_utils.unreplicate(state.step )
a =running_loss.item() / i
a =self.scheduler_fn(state_step - 1 )
a =self.evaluate(_lowerCAmelCase , _lowerCAmelCase )
a ={
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(_lowerCAmelCase ) )
self.logger.log(_lowerCAmelCase , commit=_lowerCAmelCase )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F'''-e{epoch}-s{i}''' , state=_lowerCAmelCase )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
a =get_batched_dataset(_lowerCAmelCase , self.args.batch_size )
a =len(_lowerCAmelCase ) // self.args.batch_size
a =jnp.array(0 , dtype=jnp.floataa )
a =0
for batch in tqdm(_lowerCAmelCase , total=_lowerCAmelCase , desc="""Evaluating ... """ ):
a =self.data_collator(_lowerCAmelCase )
a =self.val_step_fn(_lowerCAmelCase , **_lowerCAmelCase )
running_loss += jax_utils.unreplicate(metrics["""loss"""] )
i += 1
return running_loss / i
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
a =jax_utils.unreplicate(_lowerCAmelCase )
print(F'''SAVING CHECKPOINT IN {save_dir}''' , end=""" ... """ )
self.model_save_fn(_lowerCAmelCase , params=state.params )
with open(os.path.join(_lowerCAmelCase , """opt_state.msgpack""" ) , """wb""" ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(_lowerCAmelCase , """args.joblib""" ) )
joblib.dump(self.data_collator , os.path.join(_lowerCAmelCase , """data_collator.joblib""" ) )
with open(os.path.join(_lowerCAmelCase , """training_state.json""" ) , """w""" ) as f:
json.dump({"""step""": state.step.item()} , _lowerCAmelCase )
print("""DONE""" )
def lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] )-> int:
"""simple docstring"""
print(F'''RESTORING CHECKPOINT FROM {save_dir}''' , end=""" ... """ )
with open(os.path.join(UpperCAmelCase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
a =from_bytes(state.params , f.read() )
with open(os.path.join(UpperCAmelCase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
a =from_bytes(state.opt_state , f.read() )
a =joblib.load(os.path.join(UpperCAmelCase_ , """args.joblib""" ) )
a =joblib.load(os.path.join(UpperCAmelCase_ , """data_collator.joblib""" ) )
with open(os.path.join(UpperCAmelCase_ , """training_state.json""" ) , """r""" ) as f:
a =json.load(UpperCAmelCase_ )
a =training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def lowerCamelCase ( UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] )-> Any:
"""simple docstring"""
a =num_train_steps - warmup_steps
a =optax.linear_schedule(init_value=UpperCAmelCase_ , end_value=UpperCAmelCase_ , transition_steps=UpperCAmelCase_ )
a =optax.linear_schedule(init_value=UpperCAmelCase_ , end_value=1e-7 , transition_steps=UpperCAmelCase_ )
a =optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] )-> Any:
"""simple docstring"""
def weight_decay_mask(UpperCAmelCase_ : int ):
a =traverse_util.flatten_dict(UpperCAmelCase_ )
a ={k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(UpperCAmelCase_ )
a =scheduler_fn(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a =optax.adamw(learning_rate=UpperCAmelCase_ , weight_decay=UpperCAmelCase_ , mask=UpperCAmelCase_ )
return tx, lr
| 703
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 321
| 0
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=2 , A_=24 , A_=16 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=None , A_=2 , A_=2 , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = num_mel_bins
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = frequency_stride
SCREAMING_SNAKE_CASE__ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
SCREAMING_SNAKE_CASE__ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
SCREAMING_SNAKE_CASE__ = (self.max_length - self.patch_size) // self.time_stride + 1
SCREAMING_SNAKE_CASE__ = frequency_out_dimension * time_out_dimension
SCREAMING_SNAKE_CASE__ = num_patches + 2
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_values, labels
def lowercase_ ( self ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowercase_ ( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ASTModel(config=A_ )
model.to(A_ )
model.eval()
SCREAMING_SNAKE_CASE__ = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'''input_values''': input_values}
return config, inputs_dict
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Any = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ : Union[str, Any] = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : List[Any] = False
lowerCamelCase__ : Optional[int] = False
lowerCamelCase__ : Tuple = False
def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = ASTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def lowercase_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''AST does not use inputs_embeds''' )
def lowercase_ ( self ):
'''simple docstring'''
pass
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(A_ )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['''input_values''']
self.assertListEqual(arg_names[:1] , A_ )
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@slow
def lowercase_ ( self ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ = ASTModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def __snake_case ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = torchaudio.load(lowerCAmelCase_ )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase_ ( self ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' )
if is_torchaudio_available()
else None
)
@slow
def lowercase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ = ASTForAudioClassification.from_pretrained('''MIT/ast-finetuned-audioset-10-10-0.4593''' ).to(A_ )
SCREAMING_SNAKE_CASE__ = self.default_feature_extractor
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = prepare_audio()
SCREAMING_SNAKE_CASE__ = audio.squeeze().numpy()
SCREAMING_SNAKE_CASE__ = feature_extractor(A_ , sampling_rate=A_ , return_tensors='''pt''' ).to(A_ )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**A_ )
# verify the logits
SCREAMING_SNAKE_CASE__ = torch.Size((1, 5_27) )
self.assertEqual(outputs.logits.shape , A_ )
SCREAMING_SNAKE_CASE__ = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(A_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A_ , atol=1E-4 ) )
| 100
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase ( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DebertaTokenizer
lowerCAmelCase__ = True
lowerCAmelCase__ = DebertaTokenizerFast
def lowerCAmelCase__ ( self : int ) ->Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
UpperCAmelCase_ = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
UpperCAmelCase_ = dict(zip(UpperCAmelCase__ , range(len(UpperCAmelCase__ ) ) ) )
UpperCAmelCase_ = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase_ = {'''unk_token''': '''[UNK]'''}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase__ ) )
def lowerCAmelCase__ ( self : str , **UpperCAmelCase__ : Optional[int] ) ->Optional[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Dict , UpperCAmelCase__ : int ) ->Union[str, Any]:
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = '''lower newer'''
return input_text, output_text
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Union[str, Any]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = '''lower newer'''
UpperCAmelCase_ = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase__ )
self.assertListEqual(UpperCAmelCase__ , UpperCAmelCase__ )
UpperCAmelCase_ = tokens + [tokenizer.unk_token]
UpperCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase__ ) , UpperCAmelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer('''Hello''' , '''World''' )
UpperCAmelCase_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , UpperCAmelCase__ )
@slow
def lowerCAmelCase__ ( self : str ) ->int:
UpperCAmelCase_ = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase_ = tokenizer.encode('''sequence builders''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode('''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode(
'''sequence builders''' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase__ , UpperCAmelCase__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def lowerCAmelCase__ ( self : str ) ->List[str]:
UpperCAmelCase_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
UpperCAmelCase_ = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
UpperCAmelCase_ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
UpperCAmelCase_ = tokenizer(UpperCAmelCase__ , padding=UpperCAmelCase__ )
UpperCAmelCase_ = [tokenizer.decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ ) for seq in encoding['''input_ids''']]
# fmt: off
UpperCAmelCase_ = {
'''input_ids''': [
[1, 2118, 1_1126, 565, 35, 83, 2_5191, 163, 1_8854, 13, 1_2156, 12, 1_6101, 2_5376, 1_3807, 9, 2_2205, 2_7893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 1_1126, 565, 2_4536, 80, 4_3797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 3_3183, 1_1303, 4_3797, 1938, 4, 870, 2_4165, 2_9105, 5, 739, 3_2644, 3_3183, 1_1303, 3_6173, 88, 80, 650, 7821, 4_5940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 1_3171, 31, 5, 1836, 9, 3_2644, 3_3183, 1_1303, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
UpperCAmelCase_ = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , UpperCAmelCase__ )
for expected, decoded in zip(UpperCAmelCase__ , UpperCAmelCase__ ):
self.assertEqual(UpperCAmelCase__ , UpperCAmelCase__ )
| 390
| 0
|
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """time_series_transformer"""
__snake_case = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
"""num_hidden_layers""": """encoder_layers""",
}
def __init__( self: List[str] , a: Optional[int] = None , a: Optional[int] = None , a: str = "student_t" , a: str = "nll" , a: int = 1 , a: List[int] = [1, 2, 3, 4, 5, 6, 7] , a: Optional[Union[str, bool]] = "mean" , a: int = 0 , a: int = 0 , a: int = 0 , a: int = 0 , a: Optional[List[int]] = None , a: Optional[List[int]] = None , a: int = 32 , a: int = 32 , a: int = 2 , a: int = 2 , a: int = 2 , a: int = 2 , a: bool = True , a: str = "gelu" , a: int = 64 , a: float = 0.1 , a: float = 0.1 , a: float = 0.1 , a: float = 0.1 , a: float = 0.1 , a: int = 100 , a: float = 0.0_2 , a: str=True , **a: Dict , ):
# time series specific configuration
__lowerCamelCase : Optional[int] = prediction_length
__lowerCamelCase : Optional[int] = context_length or prediction_length
__lowerCamelCase : Union[str, Any] = distribution_output
__lowerCamelCase : int = loss
__lowerCamelCase : List[Any] = input_size
__lowerCamelCase : Dict = num_time_features
__lowerCamelCase : Any = lags_sequence
__lowerCamelCase : str = scaling
__lowerCamelCase : Dict = num_dynamic_real_features
__lowerCamelCase : Union[str, Any] = num_static_real_features
__lowerCamelCase : int = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase : str = cardinality
else:
__lowerCamelCase : List[str] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
__lowerCamelCase : str = embedding_dimension
else:
__lowerCamelCase : List[Any] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
__lowerCamelCase : int = num_parallel_samples
# Transformer architecture configuration
__lowerCamelCase : int = input_size * len(a ) + self._number_of_features
__lowerCamelCase : str = d_model
__lowerCamelCase : List[str] = encoder_attention_heads
__lowerCamelCase : Optional[Any] = decoder_attention_heads
__lowerCamelCase : Optional[Any] = encoder_ffn_dim
__lowerCamelCase : Optional[int] = decoder_ffn_dim
__lowerCamelCase : str = encoder_layers
__lowerCamelCase : Optional[Any] = decoder_layers
__lowerCamelCase : Dict = dropout
__lowerCamelCase : Optional[Any] = attention_dropout
__lowerCamelCase : Dict = activation_dropout
__lowerCamelCase : List[str] = encoder_layerdrop
__lowerCamelCase : int = decoder_layerdrop
__lowerCamelCase : str = activation_function
__lowerCamelCase : Optional[int] = init_std
__lowerCamelCase : List[Any] = use_cache
super().__init__(is_encoder_decoder=a , **a )
@property
def _snake_case ( self: List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 710
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return getitem, k
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return setitem, k, v
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
return delitem, k
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ):
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ ), None
except Exception as e:
return None, e
lowercase_ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowercase_ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowercase_ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowercase_ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowercase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowercase_ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = HashMap(initial_block_size=4 )
__lowerCamelCase : Dict = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : str = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__ ) == str(SCREAMING_SNAKE_CASE__ )
assert set(SCREAMING_SNAKE_CASE__ ) == set(SCREAMING_SNAKE_CASE__ )
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
assert set(my.items() ) == set(py.items() )
def UpperCamelCase__ ( ):
def is_public(SCREAMING_SNAKE_CASE__ ) -> bool:
return not name.startswith('_' )
__lowerCamelCase : Optional[Any] = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE__ )}
__lowerCamelCase : Dict = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE__ )}
assert dict_public_names > hash_public_names
| 230
| 0
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class a_ ( a_ ):
'''simple docstring'''
__a: str = field(default='''automatic-speech-recognition''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
__a: ClassVar[Features] = Features({'''audio''': Audio()} )
__a: ClassVar[Features] = Features({'''transcription''': Value('''string''' )} )
__a: str = "audio"
__a: str = "transcription"
def _lowercase ( self , lowercase_ ) -> List[str]:
'''simple docstring'''
if self.audio_column not in features:
raise ValueError(f'''Column {self.audio_column} is not present in features.''' )
if not isinstance(features[self.audio_column] , lowercase_ ):
raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' )
lowerCAmelCase_ = copy.deepcopy(self )
lowerCAmelCase_ = self.input_schema.copy()
lowerCAmelCase_ = features[self.audio_column]
lowerCAmelCase_ = input_schema
return task_template
@property
def _lowercase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 318
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def _lowercase ( self ) -> int:
'''simple docstring'''
lowerCAmelCase_ = self.dummy_uncond_unet
lowerCAmelCase_ = ScoreSdeVeScheduler()
lowerCAmelCase_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ ).images
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=2 , output_type='numpy' , generator=lowercase_ , return_dict=lowercase_ )[
0
]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'google/ncsnpp-church-256'
lowerCAmelCase_ = UNetaDModel.from_pretrained(lowercase_ )
lowerCAmelCase_ = ScoreSdeVeScheduler.from_pretrained(lowercase_ )
lowerCAmelCase_ = ScoreSdeVePipeline(unet=lowercase_ , scheduler=lowercase_ )
sde_ve.to(lowercase_ )
sde_ve.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = sde_ve(num_inference_steps=1_0 , output_type='numpy' , generator=lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowerCAmelCase_ = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 318
| 1
|
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[str]:
"""simple docstring"""
A__ = []
A__ = set({'''(''', '''[''', '''{'''} )
A__ = set({''')''', ''']''', '''}'''} )
A__ = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(lowercase_ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(lowercase_ ) == 0 or (len(lowercase_ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(lowercase_ ) == 0
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
A__ = input('''Enter sequence of brackets: ''' )
if is_balanced(lowercase_ ):
print(lowercase_ , '''is balanced''' )
else:
print(lowercase_ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 177
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : List[str]) ->List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Dict:
'''simple docstring'''
A__ = 1
A__ = 3
A__ = (32, 32)
A__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0)).to(UpperCAmelCase__)
return image
@property
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
torch.manual_seed(0)
A__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=UpperCAmelCase__ , only_cross_attention=(True, True, False) , num_class_embeds=100 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : str) ->Dict:
'''simple docstring'''
torch.manual_seed(0)
A__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
torch.manual_seed(0)
A__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='''gelu''' , projection_dim=512 , )
return CLIPTextModel(UpperCAmelCase__)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type='''v_prediction''')
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
A__ = Image.fromarray(np.uinta(UpperCAmelCase__)).convert('''RGB''').resize((64, 64))
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase__ , low_res_scheduler=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , max_noise_level=350 , )
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(0)
A__ = sd_pipe(
[prompt] , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A__ = output.images
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(0)
A__ = sd_pipe(
[prompt] , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , return_dict=UpperCAmelCase__ , )[0]
A__ = image[0, -3:, -3:, -1]
A__ = image_from_tuple[0, -3:, -3:, -1]
A__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A__ = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->int:
'''simple docstring'''
A__ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type='''v_prediction''')
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
A__ = Image.fromarray(np.uinta(UpperCAmelCase__)).convert('''RGB''').resize((64, 64))
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase__ , low_res_scheduler=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , max_noise_level=350 , )
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = '''A painting of a squirrel eating a burger'''
A__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A__ = output.images
assert image.shape[0] == 2
A__ = torch.Generator(device=UpperCAmelCase__).manual_seed(0)
A__ = sd_pipe(
[prompt] , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='''np''' , )
A__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''')
def SCREAMING_SNAKE_CASE ( self : Any) ->str:
'''simple docstring'''
A__ = self.dummy_cond_unet_upscale
A__ = DDPMScheduler()
A__ = DDIMScheduler(prediction_type='''v_prediction''')
A__ = self.dummy_vae
A__ = self.dummy_text_encoder
A__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''')
A__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1)[0]
A__ = Image.fromarray(np.uinta(UpperCAmelCase__)).convert('''RGB''').resize((64, 64))
# put models in fp16, except vae as it overflows in fp16
A__ = unet.half()
A__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
A__ = StableDiffusionUpscalePipeline(
unet=UpperCAmelCase__ , low_res_scheduler=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , vae=UpperCAmelCase__ , text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , max_noise_level=350 , )
A__ = sd_pipe.to(UpperCAmelCase__)
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__)
A__ = '''A painting of a squirrel eating a burger'''
A__ = torch.manual_seed(0)
A__ = sd_pipe(
[prompt] , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=2 , output_type='''np''' , ).images
A__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Dict) ->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( self : str) ->Any:
'''simple docstring'''
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''')
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''')
A__ = '''stabilityai/stable-diffusion-x4-upscaler'''
A__ = StableDiffusionUpscalePipeline.from_pretrained(UpperCAmelCase__)
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing()
A__ = '''a cat sitting on a park bench'''
A__ = torch.manual_seed(0)
A__ = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 1e-3
def SCREAMING_SNAKE_CASE ( self : int) ->str:
'''simple docstring'''
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''')
A__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''')
A__ = '''stabilityai/stable-diffusion-x4-upscaler'''
A__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing()
A__ = '''a cat sitting on a park bench'''
A__ = torch.manual_seed(0)
A__ = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , output_type='''np''' , )
A__ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image).max() < 5e-1
def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''')
A__ = '''stabilityai/stable-diffusion-x4-upscaler'''
A__ = StableDiffusionUpscalePipeline.from_pretrained(
UpperCAmelCase__ , torch_dtype=torch.floataa , )
pipe.to(UpperCAmelCase__)
pipe.set_progress_bar_config(disable=UpperCAmelCase__)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
A__ = '''a cat sitting on a park bench'''
A__ = torch.manual_seed(0)
A__ = pipe(
prompt=UpperCAmelCase__ , image=UpperCAmelCase__ , generator=UpperCAmelCase__ , num_inference_steps=5 , output_type='''np''' , )
A__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 177
| 1
|
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
'''simple docstring'''
A : Dict = 'ssube/stable-diffusion-x4-upscaler-onnx'
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE=0 ) -> List[str]:
snake_case_ : List[str] = floats_tensor((1, 3, 128, 128) , rng=random.Random(_SCREAMING_SNAKE_CASE ) )
snake_case_ : List[Any] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
snake_case_ : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowerCAmelCase ( self ) -> Optional[Any]:
snake_case_ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.get_dummy_inputs()
snake_case_ : str = pipe(**_SCREAMING_SNAKE_CASE ).images
snake_case_ : Optional[int] = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
snake_case_ : List[Any] = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self ) -> int:
snake_case_ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = self.get_dummy_inputs()
snake_case_ : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Dict = np.array(
[0.689_8892, 0.5924_0556, 0.5249_9527, 0.5886_6215, 0.5225_8235, 0.5257_2715, 0.6241_4473, 0.617_4387, 0.621_4964] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self ) -> List[str]:
snake_case_ : Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : int = self.get_dummy_inputs()
snake_case_ : Tuple = pipe(**_SCREAMING_SNAKE_CASE ).images
snake_case_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : int = np.array(
[0.765_9278, 0.7643_7664, 0.7557_9107, 0.769_1116, 0.7766_6986, 0.772_7672, 0.775_8664, 0.781_2226, 0.7694_2515] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self ) -> Union[str, Any]:
snake_case_ : Optional[int] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : int = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = self.get_dummy_inputs()
snake_case_ : Optional[Any] = pipe(**_SCREAMING_SNAKE_CASE ).images
snake_case_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Union[str, Any] = np.array(
[0.697_4782, 0.6890_2093, 0.7013_5885, 0.758_3618, 0.780_4545, 0.785_4912, 0.7866_7426, 0.7874_3863, 0.7807_0223] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[str] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
snake_case_ : List[str] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = self.get_dummy_inputs()
snake_case_ : Optional[int] = pipe(**_SCREAMING_SNAKE_CASE ).images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
snake_case_ : Dict = np.array(
[0.7742_4496, 0.77_3601, 0.764_5288, 0.776_9598, 0.777_2739, 0.773_8688, 0.7818_7233, 0.7787_9584, 0.76_7043] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self ) -> Any:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : List[Any] = ort.SessionOptions()
snake_case_ : int = False
return options
def _lowerCAmelCase ( self ) -> str:
snake_case_ : List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : int = init_image.resize((128, 128) )
# using the PNDM scheduler by default
snake_case_ : Any = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : Dict = "A fantasy landscape, trending on artstation"
snake_case_ : List[str] = torch.manual_seed(0 )
snake_case_ : Union[str, Any] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=10 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
snake_case_ : Any = output.images
snake_case_ : Tuple = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ : List[str] = np.array([0.4883, 0.4947, 0.4980, 0.4975, 0.4982, 0.4980, 0.5000, 0.5006, 0.4972] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _lowerCAmelCase ( self ) -> Any:
snake_case_ : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
snake_case_ : Dict = init_image.resize((128, 128) )
snake_case_ : Tuple = LMSDiscreteScheduler.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , subfolder="scheduler" )
snake_case_ : Optional[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
"ssube/stable-diffusion-x4-upscaler-onnx" , scheduler=_SCREAMING_SNAKE_CASE , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
snake_case_ : Any = "A fantasy landscape, trending on artstation"
snake_case_ : Dict = torch.manual_seed(0 )
snake_case_ : Optional[Any] = pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , guidance_scale=7.5 , num_inference_steps=20 , generator=_SCREAMING_SNAKE_CASE , output_type="np" , )
snake_case_ : Any = output.images
snake_case_ : str = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
snake_case_ : Dict = np.array(
[0.5017_3753, 0.5022_3356, 0.50_2039, 0.5023_3036, 0.502_3725, 0.502_2601, 0.501_8758, 0.5023_4085, 0.5024_1566] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 568
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowercase : int = None
lowercase : str = logging.get_logger(__name__)
lowercase : Any = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowercase : Dict = {
'''google/rembert''': 2_56,
}
lowercase : Dict = '''โ'''
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
A : Union[str, Any] = VOCAB_FILES_NAMES
A : Any = PRETRAINED_VOCAB_FILES_MAP
A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = RemBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
snake_case_ : Any = do_lower_case
snake_case_ : Dict = remove_space
snake_case_ : Optional[Any] = keep_accents
snake_case_ : Tuple = vocab_file
snake_case_ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : List[Any] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
snake_case_ : Dict = [self.sep_token_id]
snake_case_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error("Vocabulary path ({}) should be a directory".format(_SCREAMING_SNAKE_CASE ) )
return
snake_case_ : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 568
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class _UpperCAmelCase ( _A ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = 1.0 if scale is None else scale
lowerCAmelCase__ :Any = 0.0 if loc is None else loc
super().__init__(_lowerCAmelCase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=_lowerCAmelCase )] )
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.base_dist.mean * self.scale + self.loc
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.base_dist.variance * self.scale**2
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.variance.sqrt()
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase )
lowerCAmelCase__ :Any = args_dim
lowerCAmelCase__ :Optional[int] = nn.ModuleList([nn.Linear(_lowerCAmelCase , _lowerCAmelCase ) for dim in args_dim.values()] )
lowerCAmelCase__ :Any = domain_map
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = [proj(_lowerCAmelCase ) for proj in self.proj]
return self.domain_map(*_lowerCAmelCase )
class _UpperCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Tuple = function
def snake_case_ ( self , _lowerCAmelCase , *_lowerCAmelCase ):
'''simple docstring'''
return self.function(_lowerCAmelCase , *_lowerCAmelCase )
class _UpperCAmelCase :
"""simple docstring"""
A = 42
A = 42
A = 42
def __init__( self , _lowerCAmelCase = 1 ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = dim
lowerCAmelCase__ :Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
if self.dim == 1:
return self.distribution_class(*_lowerCAmelCase )
else:
return Independent(self.distribution_class(*_lowerCAmelCase ) , 1 )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self._base_distribution(_lowerCAmelCase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(_lowerCAmelCase , loc=_lowerCAmelCase , scale=_lowerCAmelCase , event_dim=self.event_dim )
@property
def snake_case_ ( self ):
'''simple docstring'''
return () if self.dim == 1 else (self.dim,)
@property
def snake_case_ ( self ):
'''simple docstring'''
return len(self.event_shape )
@property
def snake_case_ ( self ):
'''simple docstring'''
return 0.0
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
return ParameterProjection(
in_features=_lowerCAmelCase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def snake_case_ ( self , *_lowerCAmelCase ):
'''simple docstring'''
raise NotImplementedError()
@staticmethod
def snake_case_ ( _lowerCAmelCase ):
'''simple docstring'''
return (x + torch.sqrt(torch.square(_lowerCAmelCase ) + 4.0 )) / 2.0
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = {"df": 1, "loc": 1, "scale": 1}
A = StudentT
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = cls.squareplus(_lowerCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
lowerCAmelCase__ :Union[str, Any] = 2.0 + cls.squareplus(_lowerCAmelCase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = {"loc": 1, "scale": 1}
A = Normal
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = cls.squareplus(_lowerCAmelCase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class _UpperCAmelCase ( _A ):
"""simple docstring"""
A = {"total_count": 1, "logits": 1}
A = NegativeBinomial
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = cls.squareplus(_lowerCAmelCase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def snake_case_ ( self , _lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ :int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=_lowerCAmelCase , logits=_lowerCAmelCase )
else:
return Independent(self.distribution_class(total_count=_lowerCAmelCase , logits=_lowerCAmelCase ) , 1 )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ ,lowerCAmelCase__ :Any = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 111
|
from __future__ import annotations
import math
from collections.abc import Callable
def snake_case__ ( UpperCAmelCase : Callable[[int | float], int | float] , UpperCAmelCase : int | float , UpperCAmelCase : int | float , UpperCAmelCase : int = 1_0_0 , ):
lowerCAmelCase__ :int = x_start
lowerCAmelCase__ :int = fnc(UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 0.0
for _ in range(UpperCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCAmelCase__ :Dict = (x_end - x_start) / steps + xa
lowerCAmelCase__ :Optional[Any] = fnc(UpperCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCAmelCase__ :Union[str, Any] = xa
lowerCAmelCase__ :str = fxa
return length
if __name__ == "__main__":
def snake_case__ ( UpperCAmelCase : int ):
return math.sin(1_0 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
_a : Any = 10
while i <= 10_0000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 111
| 1
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def __lowercase ( snake_case ):
"""simple docstring"""
return getitem, k
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
return setitem, k, v
def __lowercase ( snake_case ):
"""simple docstring"""
return delitem, k
def __lowercase ( snake_case, snake_case, *snake_case ):
"""simple docstring"""
try:
return fun(snake_case, *snake_case ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__ : List[str] = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
SCREAMING_SNAKE_CASE__ : Dict = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
SCREAMING_SNAKE_CASE__ : Dict = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
SCREAMING_SNAKE_CASE__ : str = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
SCREAMING_SNAKE_CASE__ : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__ : Tuple = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'''operations''', (
pytest.param(_add_items, id='''add items''' ),
pytest.param(_overwrite_items, id='''overwrite items''' ),
pytest.param(_delete_items, id='''delete items''' ),
pytest.param(_access_absent_items, id='''access absent items''' ),
pytest.param(_add_with_resize_up, id='''add with resize up''' ),
pytest.param(_add_with_resize_down, id='''add with resize down''' ),
), )
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Optional[int] = HashMap(initial_block_size=4 )
__magic_name__ :int = {}
for _, (fun, *args) in enumerate(snake_case ):
__magic_name__ , __magic_name__ :Union[str, Any] = _run_operation(snake_case, snake_case, *snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = _run_operation(snake_case, snake_case, *snake_case )
assert my_res == py_res
assert str(snake_case ) == str(snake_case )
assert set(snake_case ) == set(snake_case )
assert len(snake_case ) == len(snake_case )
assert set(my.items() ) == set(py.items() )
def __lowercase ( ):
"""simple docstring"""
def is_public(snake_case ) -> bool:
return not name.startswith('''_''' )
__magic_name__ :List[Any] = {name for name in dir({} ) if is_public(snake_case )}
__magic_name__ :Any = {name for name in dir(HashMap() ) if is_public(snake_case )}
assert dict_public_names > hash_public_names
| 0
|
SCREAMING_SNAKE_CASE__ : Tuple = {
"""a""": """AAAAA""",
"""b""": """AAAAB""",
"""c""": """AAABA""",
"""d""": """AAABB""",
"""e""": """AABAA""",
"""f""": """AABAB""",
"""g""": """AABBA""",
"""h""": """AABBB""",
"""i""": """ABAAA""",
"""j""": """BBBAA""",
"""k""": """ABAAB""",
"""l""": """ABABA""",
"""m""": """ABABB""",
"""n""": """ABBAA""",
"""o""": """ABBAB""",
"""p""": """ABBBA""",
"""q""": """ABBBB""",
"""r""": """BAAAA""",
"""s""": """BAAAB""",
"""t""": """BAABA""",
"""u""": """BAABB""",
"""v""": """BBBAB""",
"""w""": """BABAA""",
"""x""": """BABAB""",
"""y""": """BABBA""",
"""z""": """BABBB""",
""" """: """ """,
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {value: key for key, value in encode_dict.items()}
def __lowercase ( snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = ''''''
for letter in word.lower():
if letter.isalpha() or letter == " ":
encoded += encode_dict[letter]
else:
raise Exception('''encode() accepts only letters of the alphabet and spaces''' )
return encoded
def __lowercase ( snake_case ):
"""simple docstring"""
if set(snake_case ) - {"A", "B", " "} != set():
raise Exception('''decode() accepts only \'A\', \'B\' and spaces''' )
__magic_name__ :Dict = ''''''
for word in coded.split():
while len(snake_case ) != 0:
decoded += decode_dict[word[:5]]
__magic_name__ :int = word[5:]
decoded += " "
return decoded.strip()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 0
| 1
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__a : str = {
"""distilbert""": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"""roberta""": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"""bert""": (BertConfig, BertForMaskedLM, BertTokenizer),
"""gpt2""": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
elif args.student_type == "gpt2":
__lowercase = False
def UpperCAmelCase ( lowercase , lowercase ):
"""simple docstring"""
if args.student_type == "roberta":
__lowercase = False
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser(description='''Training''' )
parser.add_argument('''--force''' , action='''store_true''' , help='''Overwrite dump_path if it already exists.''' )
parser.add_argument(
'''--dump_path''' , type=lowercase , required=lowercase , help='''The output directory (log, checkpoints, parameters, etc.)''' )
parser.add_argument(
'''--data_file''' , type=lowercase , required=lowercase , help='''The binarized file (tokenized + tokens_to_ids) and grouped by sequence.''' , )
parser.add_argument(
'''--student_type''' , type=lowercase , choices=['''distilbert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''The student type (DistilBERT, RoBERTa).''' , )
parser.add_argument('''--student_config''' , type=lowercase , required=lowercase , help='''Path to the student configuration.''' )
parser.add_argument(
'''--student_pretrained_weights''' , default=lowercase , type=lowercase , help='''Load student initialization checkpoint.''' )
parser.add_argument(
'''--teacher_type''' , choices=['''bert''', '''roberta''', '''gpt2'''] , required=lowercase , help='''Teacher type (BERT, RoBERTa).''' )
parser.add_argument('''--teacher_name''' , type=lowercase , required=lowercase , help='''The teacher model.''' )
parser.add_argument('''--temperature''' , default=2.0 , type=lowercase , help='''Temperature for the softmax temperature.''' )
parser.add_argument(
'''--alpha_ce''' , default=0.5 , type=lowercase , help='''Linear weight for the distillation loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_mlm''' , default=0.0 , type=lowercase , help='''Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.''' , )
parser.add_argument('''--alpha_clm''' , default=0.5 , type=lowercase , help='''Linear weight for the CLM loss. Must be >=0.''' )
parser.add_argument('''--alpha_mse''' , default=0.0 , type=lowercase , help='''Linear weight of the MSE loss. Must be >=0.''' )
parser.add_argument(
'''--alpha_cos''' , default=0.0 , type=lowercase , help='''Linear weight of the cosine embedding loss. Must be >=0.''' )
parser.add_argument(
'''--mlm''' , action='''store_true''' , help='''The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.''' )
parser.add_argument(
'''--mlm_mask_prop''' , default=0.15 , type=lowercase , help='''Proportion of tokens for which we need to make a prediction.''' , )
parser.add_argument('''--word_mask''' , default=0.8 , type=lowercase , help='''Proportion of tokens to mask out.''' )
parser.add_argument('''--word_keep''' , default=0.1 , type=lowercase , help='''Proportion of tokens to keep.''' )
parser.add_argument('''--word_rand''' , default=0.1 , type=lowercase , help='''Proportion of tokens to randomly replace.''' )
parser.add_argument(
'''--mlm_smoothing''' , default=0.7 , type=lowercase , help='''Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).''' , )
parser.add_argument('''--token_counts''' , type=lowercase , help='''The token counts in the data_file for MLM.''' )
parser.add_argument(
'''--restrict_ce_to_mask''' , action='''store_true''' , help='''If true, compute the distillation loss only the [MLM] prediction distribution.''' , )
parser.add_argument(
'''--freeze_pos_embs''' , action='''store_true''' , help='''Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.''' , )
parser.add_argument(
'''--freeze_token_type_embds''' , action='''store_true''' , help='''Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.''' , )
parser.add_argument('''--n_epoch''' , type=lowercase , default=3 , help='''Number of pass on the whole dataset.''' )
parser.add_argument('''--batch_size''' , type=lowercase , default=5 , help='''Batch size (for each process).''' )
parser.add_argument(
'''--group_by_size''' , action='''store_false''' , help='''If true, group sequences that have similar length into the same batch. Default is true.''' , )
parser.add_argument(
'''--gradient_accumulation_steps''' , type=lowercase , default=50 , help='''Gradient accumulation for larger training batches.''' , )
parser.add_argument('''--warmup_prop''' , default=0.05 , type=lowercase , help='''Linear warmup proportion.''' )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--learning_rate''' , default=5E-4 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-6 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--max_grad_norm''' , default=5.0 , type=lowercase , help='''Max gradient norm.''' )
parser.add_argument('''--initializer_range''' , default=0.02 , type=lowercase , help='''Random initialization range.''' )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=lowercase , default='''O1''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_gpu''' , type=lowercase , default=1 , help='''Number of GPUs in the node.''' )
parser.add_argument('''--local_rank''' , type=lowercase , default=-1 , help='''Distributed training - Local rank''' )
parser.add_argument('''--seed''' , type=lowercase , default=56 , help='''Random seed''' )
parser.add_argument('''--log_interval''' , type=lowercase , default=500 , help='''Tensorboard logging interval.''' )
parser.add_argument('''--checkpoint_interval''' , type=lowercase , default=4000 , help='''Checkpoint interval.''' )
__lowercase = parser.parse_args()
sanity_checks(lowercase )
# ARGS #
init_gpu_params(lowercase )
set_seed(lowercase )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
F"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
''' itUse `--force` if you want to overwrite it''' )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(F"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(F"Param: {args}" )
with open(os.path.join(args.dump_path , '''parameters.json''' ) , '''w''' ) as f:
json.dump(vars(lowercase ) , lowercase , indent=4 )
git_log(args.dump_path )
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.student_type]
__lowercase , __lowercase , __lowercase = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__lowercase = teacher_tokenizer_class.from_pretrained(args.teacher_name )
__lowercase = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__lowercase = tokenizer.all_special_tokens.index(lowercase )
__lowercase = tokenizer.all_special_ids[idx]
logger.info(F"Special tokens {special_tok_ids}" )
__lowercase = special_tok_ids
__lowercase = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F"Loading data from {args.data_file}" )
with open(args.data_file , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
if args.mlm:
logger.info(F"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , '''rb''' ) as fp:
__lowercase = pickle.load(lowercase )
__lowercase = np.maximum(lowercase , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__lowercase = 0.0 # do not predict special tokens
__lowercase = torch.from_numpy(lowercase )
else:
__lowercase = None
__lowercase = LmSeqsDataset(params=lowercase , data=lowercase )
logger.info('''Data loader created.''' )
# STUDENT #
logger.info(F"Loading student config from {args.student_config}" )
__lowercase = student_config_class.from_pretrained(args.student_config )
__lowercase = True
if args.student_pretrained_weights is not None:
logger.info(F"Loading pretrained weights from {args.student_pretrained_weights}" )
__lowercase = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowercase )
else:
__lowercase = student_model_class(lowercase )
if args.n_gpu > 0:
student.to(F"cuda:{args.local_rank}" )
logger.info('''Student loaded.''' )
# TEACHER #
__lowercase = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowercase )
if args.n_gpu > 0:
teacher.to(F"cuda:{args.local_rank}" )
logger.info(F"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowercase , lowercase )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowercase , lowercase )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__lowercase = Distiller(
params=lowercase , dataset=lowercase , token_probs=lowercase , student=lowercase , teacher=lowercase )
distiller.train()
logger.info('''Let\'s go get some drinks.''' )
if __name__ == "__main__":
main()
| 522
|
from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
__a : Dict = logging.get_logger(__name__)
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
__a : str = ['''pixel_values''']
def __init__( self , lowerCAmelCase__ = True , lowerCAmelCase__ = 1 / 2_55 , lowerCAmelCase__ = True , lowerCAmelCase__ = 8 , **lowerCAmelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCAmelCase__ )
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_pad
__lowercase = pad_size
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , **lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
return rescale(lowerCAmelCase__ , scale=lowerCAmelCase__ , data_format=lowerCAmelCase__ , **lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = get_image_size(lowerCAmelCase__ )
__lowercase = (old_height // size + 1) * size - old_height
__lowercase = (old_width // size + 1) * size - old_width
return pad(lowerCAmelCase__ , ((0, pad_height), (0, pad_width)) , mode='''symmetric''' , data_format=lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = ChannelDimension.FIRST , **lowerCAmelCase__ , ) -> Tuple:
'''simple docstring'''
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_pad if do_pad is not None else self.do_pad
__lowercase = pad_size if pad_size is not None else self.pad_size
__lowercase = make_list_of_images(lowerCAmelCase__ )
if not valid_images(lowerCAmelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCAmelCase__ ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowerCAmelCase__ , scale=lowerCAmelCase__ ) for image in images]
if do_pad:
__lowercase = [self.pad(lowerCAmelCase__ , size=lowerCAmelCase__ ) for image in images]
__lowercase = [to_channel_dimension_format(lowerCAmelCase__ , lowerCAmelCase__ ) for image in images]
__lowercase = {'''pixel_values''': images}
return BatchFeature(data=lowerCAmelCase__ , tensor_type=lowerCAmelCase__ )
| 522
| 1
|
from math import sqrt
def a__ ( snake_case = 1_000_000 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(snake_case , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f'''{solution() = }''')
| 74
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig()
# derive patch size from model name
__SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
__SCREAMING_SNAKE_CASE : Optional[Any] = 12
__SCREAMING_SNAKE_CASE : Optional[Any] = 1_024
__SCREAMING_SNAKE_CASE : int = 4_096
__SCREAMING_SNAKE_CASE : Tuple = 16
__SCREAMING_SNAKE_CASE : Optional[int] = 24
__SCREAMING_SNAKE_CASE : Optional[int] = 768
__SCREAMING_SNAKE_CASE : Optional[int] = 3_072
if model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Any = 336
__SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case )
if "large" in model_name:
__SCREAMING_SNAKE_CASE : Any = 768
return config
def a__ ( snake_case ):
"""simple docstring"""
# text encoder
if name == "token_embedding.weight":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def a__ ( snake_case , snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' )
if key.startswith('''visual''' ):
__SCREAMING_SNAKE_CASE : List[Any] = key_split[3]
__SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = val[
:dim
]
__SCREAMING_SNAKE_CASE : Tuple = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[
-dim:
]
else:
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[
:dim, :
]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : str = val[
-dim:, :
]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:]
elif key.startswith('''mit''' ):
__SCREAMING_SNAKE_CASE : List[str] = key_split[2]
__SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Any = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2]
__SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size
if "weight" in key:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim, :]
__SCREAMING_SNAKE_CASE : int = val[
dim : dim * 2, :
]
__SCREAMING_SNAKE_CASE : Dict = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Tuple = val[:dim]
__SCREAMING_SNAKE_CASE : str = val[
dim : dim * 2
]
__SCREAMING_SNAKE_CASE : int = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : int = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__SCREAMING_SNAKE_CASE : int = val.T
__SCREAMING_SNAKE_CASE : Union[str, Any] = val
return orig_state_dict
def a__ ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy'''
elif num_frames == 32:
__SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy'''
__SCREAMING_SNAKE_CASE : List[str] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , )
__SCREAMING_SNAKE_CASE : int = np.load(snake_case )
return list(snake_case )
def a__ ( snake_case , snake_case=None , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name]
__SCREAMING_SNAKE_CASE : Any = 8
if "16-frames" in model_name:
__SCREAMING_SNAKE_CASE : Optional[int] = 16
elif "shot" in model_name:
__SCREAMING_SNAKE_CASE : Optional[Any] = 32
__SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin'''
gdown.cached_download(snake_case , snake_case , quiet=snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model''']
else:
__SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case )
__SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case )
__SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
__SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case )
# Verify outputs
__SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video
__SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 )
print('''Probs:''' , snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case , organization='''nielsr''' )
processor.push_to_hub(snake_case , organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the ๐ค hub."""
)
lowercase_ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 74
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[int] = logging.get_logger(__name__)
def lowercase__(A , A ) ->Optional[int]:
"""simple docstring"""
lowercase__ : int= []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm1.weight''', f'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm1.bias''', f'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.weight''', f'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.attn.proj.bias''', f'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.norm2.weight''', f'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.norm2.bias''', f'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.weight''', f'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc1.bias''', f'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(f'''encoder.deit.blocks.{i}.mlp.fc2.weight''', f'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''encoder.deit.blocks.{i}.mlp.fc2.bias''', f'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("encoder.deit.cls_token", "encoder.embeddings.cls_token"),
("encoder.deit.pos_embed", "encoder.embeddings.position_embeddings"),
("encoder.deit.patch_embed.proj.weight", "encoder.embeddings.patch_embeddings.projection.weight"),
("encoder.deit.patch_embed.proj.bias", "encoder.embeddings.patch_embeddings.projection.bias"),
("encoder.deit.norm.weight", "encoder.layernorm.weight"),
("encoder.deit.norm.bias", "encoder.layernorm.bias"),
] )
return rename_keys
def lowercase__(A , A ) ->Optional[int]:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowercase__ : Optional[int]= state_dict.pop(f'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
lowercase__ : Optional[int]= in_proj_weight[
: encoder_config.hidden_size, :
]
lowercase__ : int= in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowercase__ : str= in_proj_weight[
-encoder_config.hidden_size :, :
]
def lowercase__(A , A , A ) ->Tuple:
"""simple docstring"""
lowercase__ : Tuple= dct.pop(_UpperCAmelCase )
lowercase__ : int= val
def lowercase__(A ) ->List[str]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
lowercase__ : str= "https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : Tuple= "https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"
lowercase__ : Tuple= Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ).convert("RGB" )
return im
@torch.no_grad()
def lowercase__(A , A ) ->int:
"""simple docstring"""
lowercase__ : Optional[Any]= ViTConfig(image_size=384 , qkv_bias=_UpperCAmelCase )
lowercase__ : Union[str, Any]= TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowercase__ : List[Any]= 768
elif "large" in checkpoint_url:
# use ViT-large encoder
lowercase__ : Tuple= 1_024
lowercase__ : Optional[int]= 4_096
lowercase__ : int= 24
lowercase__ : List[str]= 16
lowercase__ : List[Any]= 1_024
else:
raise ValueError("Should either find \'base\' or \'large\' in checkpoint URL" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowercase__ : str= False
lowercase__ : List[Any]= "relu"
lowercase__ : List[str]= 1_024
lowercase__ : str= True
lowercase__ : int= False
lowercase__ : Any= False
# load HuggingFace model
lowercase__ : Dict= ViTModel(_UpperCAmelCase , add_pooling_layer=_UpperCAmelCase )
lowercase__ : str= TrOCRForCausalLM(_UpperCAmelCase )
lowercase__ : Any= VisionEncoderDecoderModel(encoder=_UpperCAmelCase , decoder=_UpperCAmelCase )
model.eval()
# load state_dict of original model, rename some keys
lowercase__ : Tuple= torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" , check_hash=_UpperCAmelCase )["model"]
lowercase__ : List[Any]= create_rename_keys(_UpperCAmelCase , _UpperCAmelCase )
for src, dest in rename_keys:
rename_key(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
read_in_q_k_v(_UpperCAmelCase , _UpperCAmelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowercase__ : Tuple= state_dict.pop(_UpperCAmelCase )
if key.startswith("decoder" ) and "output_projection" not in key:
lowercase__ : Any= val
else:
lowercase__ : Optional[Any]= val
# load state dict
model.load_state_dict(_UpperCAmelCase )
# Check outputs on an image
lowercase__ : str= ViTImageProcessor(size=encoder_config.image_size )
lowercase__ : int= RobertaTokenizer.from_pretrained("roberta-large" )
lowercase__ : Any= TrOCRProcessor(_UpperCAmelCase , _UpperCAmelCase )
lowercase__ : str= processor(images=prepare_img(_UpperCAmelCase ) , return_tensors="pt" ).pixel_values
# verify logits
lowercase__ : Any= torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowercase__ : Optional[Any]= model(pixel_values=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase )
lowercase__ : int= outputs.logits
lowercase__ : Any= torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
lowercase__ : Tuple= torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
lowercase__ : Tuple= torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
lowercase__ : List[Any]= torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
lowercase__ : Optional[int]= torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCAmelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_UpperCAmelCase ).mkdir(exist_ok=_UpperCAmelCase )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_UpperCAmelCase )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
a : Optional[int] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 716
|
"""simple docstring"""
def lowercase__(A ) ->bool:
"""simple docstring"""
lowercase__ : Tuple= (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__(A = 5_000 ) ->int:
"""simple docstring"""
lowercase__ : str= [(i * (3 * i - 1)) // 2 for i in range(1 , A )]
for i, pentagonal_i in enumerate(A ):
for j in range(A , len(A ) ):
lowercase__ : List[Any]= pentagonal_nums[j]
lowercase__ : int= pentagonal_i + pentagonal_j
lowercase__ : Optional[int]= pentagonal_j - pentagonal_i
if is_pentagonal(A ) and is_pentagonal(A ):
return b
return -1
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85
| 0
|
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class UpperCamelCase__ ( __lowerCamelCase ):
def __init__( self : Optional[Any], *__lowerCamelCase : Union[str, Any], **__lowerCamelCase : Union[str, Any] ) -> List[Any]:
super().__init__(*__a, **__a )
requires_backends(self, '''vision''' )
self.check_model_type(__a )
def __call__( self : Dict, __lowerCamelCase : Union[str, List[str], "Image.Image", List["Image.Image"]], **__lowerCamelCase : str ) -> Optional[int]:
return super().__call__(__a, **__a )
def __lowercase( self : Any, **__lowerCamelCase : Union[str, Any] ) -> Dict:
return {}, {}, {}
def __lowercase( self : List[str], __lowerCamelCase : Union[str, Any] ) -> List[str]:
UpperCamelCase__ : List[str] = load_image(__a )
UpperCamelCase__ : Tuple = image.size
UpperCamelCase__ : List[str] = self.image_processor(images=__a, return_tensors=self.framework )
return model_inputs
def __lowercase( self : Optional[int], __lowerCamelCase : Optional[int] ) -> Optional[int]:
UpperCamelCase__ : Optional[Any] = self.model(**__a )
return model_outputs
def __lowercase( self : List[str], __lowerCamelCase : int ) -> List[Any]:
UpperCamelCase__ : Dict = model_outputs.predicted_depth
UpperCamelCase__ : Dict = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ), size=self.image_size[::-1], mode='''bicubic''', align_corners=__a )
UpperCamelCase__ : Tuple = prediction.squeeze().cpu().numpy()
UpperCamelCase__ : Tuple = (output * 2_55 / np.max(__a )).astype('''uint8''' )
UpperCamelCase__ : Optional[Any] = Image.fromarray(__a )
UpperCamelCase__ : List[str] = {}
UpperCamelCase__ : int = predicted_depth
UpperCamelCase__ : List[str] = depth
return output_dict
| 344
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A ( pl.LightningModule ):
def __init__( self : Dict , __a : List[str] ) -> Tuple:
super().__init__()
__UpperCAmelCase = model
__UpperCAmelCase = 2
__UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def snake_case__ ( self : int ) -> int:
pass
def lowerCAmelCase ( UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
# load longformer model from model identifier
__UpperCAmelCase = LongformerModel.from_pretrained(UpperCamelCase__ )
__UpperCAmelCase = LightningModel(UpperCamelCase__ )
__UpperCAmelCase = torch.load(UpperCamelCase__ , map_location=torch.device('''cpu''' ) )
lightning_model.load_state_dict(ckpt['''state_dict'''] )
# init longformer question answering model
__UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(UpperCamelCase__ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(UpperCamelCase__ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--longformer_model",
default=None,
type=str,
required=True,
help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.",
)
parser.add_argument(
"--longformer_question_answering_ckpt_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch Lightning Checkpoint.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__lowerCAmelCase : List[str] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 262
| 0
|
import gc
import threading
import time
import psutil
import torch
class _UpperCAmelCase :
def __init__( self : str):
SCREAMING_SNAKE_CASE_ :Tuple = psutil.Process()
SCREAMING_SNAKE_CASE_ :int = False
def _snake_case ( self : Dict):
SCREAMING_SNAKE_CASE_ :Dict = -1
while True:
SCREAMING_SNAKE_CASE_ :Optional[int] = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _snake_case ( self : Tuple):
SCREAMING_SNAKE_CASE_ :int = True
SCREAMING_SNAKE_CASE_ :Dict = threading.Thread(target=self.peak_monitor)
SCREAMING_SNAKE_CASE_ :int = True
self.thread.start()
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = False
self.thread.join()
return self.cpu_memory_peak
SCREAMING_SNAKE_CASE__ = PeakCPUMemory()
def lowercase ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :List[Any] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ :str = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ :Any = torch.cuda.memory_allocated(a )
torch.cuda.reset_peak_memory_stats()
return measures
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
SCREAMING_SNAKE_CASE_ :Tuple = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
SCREAMING_SNAKE_CASE_ :List[Any] = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (torch.cuda.memory_allocated(a ) - start_measures[str(a )]) / 2**20
SCREAMING_SNAKE_CASE_ :Union[str, Any] = (torch.cuda.max_memory_allocated(a ) - start_measures[str(a )]) / 2**20
return measures
def lowercase ( a , a ):
'''simple docstring'''
print(F"{description}:" )
print(F"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(F"- GPU {i} allocated: {measures[str(a )]:.2f}MiB" )
SCREAMING_SNAKE_CASE_ :List[str] = measures[F"{i}-peak"]
print(F"- GPU {i} peak: {peak:.2f}MiB" )
print(F"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(F"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["AlbertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["AlbertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"AlbertForMaskedLM",
"AlbertForMultipleChoice",
"AlbertForPreTraining",
"AlbertForQuestionAnswering",
"AlbertForSequenceClassification",
"AlbertForTokenClassification",
"AlbertModel",
"AlbertPreTrainedModel",
"load_tf_weights_in_albert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAlbertForMaskedLM",
"TFAlbertForMultipleChoice",
"TFAlbertForPreTraining",
"TFAlbertForQuestionAnswering",
"TFAlbertForSequenceClassification",
"TFAlbertForTokenClassification",
"TFAlbertMainLayer",
"TFAlbertModel",
"TFAlbertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FlaxAlbertForMaskedLM",
"FlaxAlbertForMultipleChoice",
"FlaxAlbertForPreTraining",
"FlaxAlbertForQuestionAnswering",
"FlaxAlbertForSequenceClassification",
"FlaxAlbertForTokenClassification",
"FlaxAlbertModel",
"FlaxAlbertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 140
| 0
|
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _UpperCamelCase ( snake_case__ ) -> None:
__UpperCAmelCase , __UpperCAmelCase : List[str] = analyze_text(_lowerCamelCase )
__UpperCAmelCase : str = list(" " + ascii_lowercase )
# what is our total sum of probabilities.
__UpperCAmelCase : Optional[int] = sum(single_char_strings.values() )
# one length string
__UpperCAmelCase : int = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__UpperCAmelCase : Optional[Any] = single_char_strings[ch]
__UpperCAmelCase : int = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase ) # entropy formula.
# print entropy
print(f'''{round(-1 * my_fir_sum ):.1f}''' )
# two len string
__UpperCAmelCase : str = sum(two_char_strings.values() )
__UpperCAmelCase : int = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__UpperCAmelCase : Any = cha + cha
if sequence in two_char_strings:
__UpperCAmelCase : Union[str, Any] = two_char_strings[sequence]
__UpperCAmelCase : Optional[int] = int(_lowerCamelCase ) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase )
# print second entropy
print(f'''{round(-1 * my_sec_sum ):.1f}''' )
# print the difference between them
print(f'''{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}''' )
def _UpperCamelCase ( snake_case__ ) -> tuple[dict, dict]:
__UpperCAmelCase : Tuple = Counter() # type: ignore
__UpperCAmelCase : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0, len(_lowerCamelCase ) - 1 ):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _UpperCamelCase ( ) -> Tuple:
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main()
| 382
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_SCREAMING_SNAKE_CASE : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 549
| 0
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': (
'''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json'''
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __SCREAMING_SNAKE_CASE ( snake_case__):
"""simple docstring"""
__UpperCAmelCase = "trajectory_transformer"
__UpperCAmelCase = ["past_key_values"]
__UpperCAmelCase = {
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , _UpperCAmelCase=100 , _UpperCAmelCase=5 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=249 , _UpperCAmelCase=6 , _UpperCAmelCase=17 , _UpperCAmelCase=25 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0006 , _UpperCAmelCase=512 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1E-12 , _UpperCAmelCase=1 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=50_256 , _UpperCAmelCase=50_256 , **_UpperCAmelCase , ):
__snake_case : Optional[Any] = vocab_size
__snake_case : Optional[Any] = action_weight
__snake_case : Any = reward_weight
__snake_case : List[Any] = value_weight
__snake_case : str = max_position_embeddings
__snake_case : Optional[int] = block_size
__snake_case : Tuple = action_dim
__snake_case : Dict = observation_dim
__snake_case : Union[str, Any] = transition_dim
__snake_case : List[str] = learning_rate
__snake_case : List[Any] = n_layer
__snake_case : Dict = n_head
__snake_case : List[Any] = n_embd
__snake_case : Dict = embd_pdrop
__snake_case : int = attn_pdrop
__snake_case : Dict = resid_pdrop
__snake_case : int = initializer_range
__snake_case : Dict = layer_norm_eps
__snake_case : List[Any] = kaiming_initializer_range
__snake_case : List[Any] = use_cache
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
| 704
|
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = '''โ'''
__magic_name__ = {
'''vocab_file''': '''vocab.json''',
'''spm_file''': '''sentencepiece.bpe.model''',
}
__magic_name__ = {
'''vocab_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'''
),
},
'''spm_file''': {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'''
)
},
}
__magic_name__ = {
'''facebook/s2t-small-librispeech-asr''': 1_024,
}
__magic_name__ = ['''pt''', '''fr''', '''ru''', '''nl''', '''ro''', '''it''', '''es''', '''de''']
__magic_name__ = {'''mustc''': MUSTC_LANGS}
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = MAX_MODEL_INPUT_SIZES
__UpperCAmelCase = ["input_ids", "attention_mask"]
__UpperCAmelCase = []
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase="<s>" , _UpperCAmelCase="</s>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__snake_case : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , do_upper_case=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , tgt_lang=_UpperCAmelCase , lang_codes=_UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCAmelCase , )
__snake_case : Dict = do_upper_case
__snake_case : Optional[Any] = do_lower_case
__snake_case : List[Any] = load_json(_UpperCAmelCase )
__snake_case : Dict = {v: k for k, v in self.encoder.items()}
__snake_case : Optional[Any] = spm_file
__snake_case : Any = load_spm(_UpperCAmelCase , self.sp_model_kwargs )
if lang_codes is not None:
__snake_case : Optional[Any] = lang_codes
__snake_case : int = LANGUAGES[lang_codes]
__snake_case : str = [F"""<lang:{lang}>""" for lang in self.langs]
__snake_case : Dict = {lang: self.sp_model.PieceToId(F"""<lang:{lang}>""" ) for lang in self.langs}
__snake_case : Dict = self.lang_tokens
__snake_case : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
__snake_case : Optional[int] = {}
@property
def lowercase_ ( self ):
return len(self.encoder )
@property
def lowercase_ ( self ):
return self._tgt_lang
@tgt_lang.setter
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = new_tgt_lang
self.set_tgt_lang_special_tokens(_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : Tuple = self.lang_code_to_id[tgt_lang]
__snake_case : Optional[Any] = [lang_code_id]
def lowercase_ ( self , _UpperCAmelCase ):
return self.sp_model.encode(_UpperCAmelCase , out_type=_UpperCAmelCase )
def lowercase_ ( self , _UpperCAmelCase ):
return self.encoder.get(_UpperCAmelCase , self.encoder[self.unk_token] )
def lowercase_ ( self , _UpperCAmelCase ):
return self.decoder.get(_UpperCAmelCase , self.unk_token )
def lowercase_ ( self , _UpperCAmelCase ):
__snake_case : str = []
__snake_case : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
__snake_case : Dict = self.sp_model.decode(_UpperCAmelCase )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
__snake_case : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
__snake_case : Union[str, Any] = self.sp_model.decode(_UpperCAmelCase )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase=None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
__snake_case : Union[str, Any] = [1] * len(self.prefix_tokens )
__snake_case : Optional[Any] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCAmelCase )) + ([0] * len(_UpperCAmelCase )) + suffix_ones
def lowercase_ ( self ):
__snake_case : List[Any] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
__snake_case : int = self.__dict__.copy()
__snake_case : str = None
return state
def __setstate__( self , _UpperCAmelCase ):
__snake_case : List[Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__snake_case : Optional[int] = {}
__snake_case : int = load_spm(self.spm_file , self.sp_model_kwargs )
def lowercase_ ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__snake_case : str = Path(_UpperCAmelCase )
assert save_dir.is_dir(), F"""{save_directory} should be a directory"""
__snake_case : int = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
__snake_case : Union[str, Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , _UpperCAmelCase )
if os.path.abspath(self.spm_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , _UpperCAmelCase )
elif not os.path.isfile(self.spm_file ):
with open(_UpperCAmelCase , 'wb' ) as fi:
__snake_case : List[str] = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (str(_UpperCAmelCase ), str(_UpperCAmelCase ))
def UpperCAmelCase__( __UpperCAmelCase : str , __UpperCAmelCase : Dict[str, Any] ):
__snake_case : List[str] = sentencepiece.SentencePieceProcessor(**__UpperCAmelCase )
spm.Load(str(__UpperCAmelCase ) )
return spm
def UpperCAmelCase__( __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'r' ) as f:
return json.load(__UpperCAmelCase )
def UpperCAmelCase__( __UpperCAmelCase : List[Any] , __UpperCAmelCase : str ):
with open(__UpperCAmelCase , 'w' ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase , indent=2 )
| 679
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : List[Any] = 10
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 2, 3, 4]
SCREAMING_SNAKE_CASE : int = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ )
def _UpperCamelCase ( self ) -> Tuple:
SCREAMING_SNAKE_CASE : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
SCREAMING_SNAKE_CASE : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowercase__ , self.block_size , 0 ) , lowercase__ )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[Any] = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = process_story(lowercase__ )
self.assertEqual(lowercase__ , [] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = ''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = process_story(lowercase__ )
self.assertEqual(lowercase__ , [] )
self.assertEqual(lowercase__ , [] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Dict = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = process_story(lowercase__ )
SCREAMING_SNAKE_CASE : int = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE : Any = ['It was the best of times.']
self.assertEqual(lowercase__ , lowercase__ )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : Tuple = torch.tensor([1, 2, 3, 4] )
SCREAMING_SNAKE_CASE : int = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowercase__ , 0 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE : Dict = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase__ , 23 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self ) -> str:
SCREAMING_SNAKE_CASE : List[str] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowercase__ , 1 ).numpy() , expected.numpy() )
def _UpperCamelCase ( self ) -> int:
SCREAMING_SNAKE_CASE : str = 101
SCREAMING_SNAKE_CASE : Dict = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
SCREAMING_SNAKE_CASE : Tuple = compute_token_type_ids(lowercase__ , lowercase__ )
np.testing.assert_array_equal(lowercase__ , lowercase__ )
| 251
|
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def __lowerCAmelCase ( a_ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class UpperCAmelCase ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> Dict:
SCREAMING_SNAKE_CASE : Optional[Any] = parser.add_parser('env' )
download_parser.set_defaults(func=lowercase__ )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE : Any = huggingface_hub.__version__
SCREAMING_SNAKE_CASE : List[str] = 'not installed'
SCREAMING_SNAKE_CASE : List[Any] = 'NA'
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE : str = torch.__version__
SCREAMING_SNAKE_CASE : Optional[int] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE : str = 'not installed'
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE : Any = transformers.__version__
SCREAMING_SNAKE_CASE : int = 'not installed'
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE : Optional[Any] = accelerate.__version__
SCREAMING_SNAKE_CASE : Any = 'not installed'
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE : Union[str, Any] = xformers.__version__
SCREAMING_SNAKE_CASE : Optional[int] = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': F"""{pt_version} ({pt_cuda_available})""",
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowercase__ ) )
return info
@staticmethod
def _UpperCamelCase ( lowercase__ ) -> Dict:
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 251
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __UpperCAmelCase ( ) -> str:
"""simple docstring"""
_a : str = '''https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'''
_a : Tuple = Image.open(requests.get(__a ,stream=__a ).raw ).convert('''RGB''' )
return image
def __UpperCAmelCase ( __a : Tuple ) -> List[str]:
"""simple docstring"""
_a : Tuple = []
# fmt: off
# vision encoder
rename_keys.append(('''visual_encoder.cls_token''', '''vision_model.embeddings.class_embedding''') )
rename_keys.append(('''visual_encoder.pos_embed''', '''vision_model.embeddings.position_embedding''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.weight''', '''vision_model.embeddings.patch_embedding.weight''') )
rename_keys.append(('''visual_encoder.patch_embed.proj.bias''', '''vision_model.embeddings.patch_embedding.bias''') )
rename_keys.append(('''ln_vision.weight''', '''vision_model.post_layernorm.weight''') )
rename_keys.append(('''ln_vision.bias''', '''vision_model.post_layernorm.bias''') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.weight''', '''qformer.embeddings.layernorm.weight''') )
rename_keys.append(('''Qformer.bert.embeddings.LayerNorm.bias''', '''qformer.embeddings.layernorm.bias''') )
# fmt: on
return rename_keys
def __UpperCAmelCase ( __a : Dict ,__a : List[Any] ,__a : Any ) -> Dict:
"""simple docstring"""
_a : str = dct.pop(__a )
_a : Tuple = val
def __UpperCAmelCase ( __a : Union[str, Any] ,__a : Dict ) -> Optional[int]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_a : List[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
_a : List[str] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
_a : Optional[Any] = torch.cat((q_bias, torch.zeros_like(__a ,requires_grad=__a ), v_bias) )
_a : Optional[int] = qkv_bias
def __UpperCAmelCase ( __a : Tuple ) -> List[Any]:
"""simple docstring"""
_a : int = 364 if '''coco''' in model_name else 224
_a : str = InstructBlipVisionConfig(image_size=__a ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
_a : Optional[Any] = TaConfig.from_pretrained('''google/flan-t5-xl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_a : str = TaConfig.from_pretrained('''google/flan-t5-xxl''' ,dense_act_fn='''gelu''' ,bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
_a : int = LlamaConfig.from_pretrained('''decapoda-research/llama-7b-hf''' ,vocab_size=32_001 ).to_dict()
elif "vicuna-13b" in model_name:
_a : int = LlamaConfig.from_pretrained('''decapoda-research/llama-13b-hf''' ,vocab_size=32_001 ).to_dict()
else:
raise ValueError('''Model name not supported''' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
_a : List[str] = InstructBlipQFormerConfig(vocab_size=30_523 ).to_dict()
_a : List[Any] = InstructBlipConfig(vision_config=__a ,text_config=__a ,qformer_config=__a )
return config, image_size
@torch.no_grad()
def __UpperCAmelCase ( __a : Any ,__a : Tuple=None ,__a : List[str]=False ) -> str:
"""simple docstring"""
_a : List[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' ,truncation_side='''left''' )
qformer_tokenizer.add_special_tokens({'''bos_token''': '''[DEC]'''} )
if "t5" in model_name:
_a : Any = TaTokenizerFast.from_pretrained('''google/flan-t5-xl''' ,truncation_side='''left''' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
_a : Optional[Any] = LlamaTokenizerFast.from_pretrained(
'''huggyllama/llama-7b''' ,truncation_side='''left''' ,bos_token='''</s>''' ,unk_token='''</s>''' )
tokenizer.add_special_tokens({'''pad_token''': '''[PAD]'''} )
_a : str = get_blipa_config(__a )
_a : Optional[int] = InstructBlipForConditionalGeneration(__a ).eval()
_a : Optional[int] = {
'''instructblip-vicuna-7b''': ('''blip2_vicuna_instruct''', '''vicuna7b'''),
'''instructblip-vicuna-13b''': ('''blip2_vicuna_instruct''', '''vicuna13b'''),
'''instructblip-flan-t5-xl''': ('''blip2_t5_instruct''', '''flant5xl'''),
'''instructblip-flan-t5-xxl''': ('''blip2_t5_instruct''', '''flant5xxl'''),
}
_a : Optional[int] = model_name_to_original[model_name]
# load original model
print('''Loading original model...''' )
_a : List[str] = '''cuda:1''' if torch.cuda.is_available() else '''cpu'''
_a : Union[str, Any] = '''cuda:2''' if torch.cuda.is_available() else '''cpu'''
_a : Any = load_model_and_preprocess(
name=__a ,model_type=__a ,is_eval=__a ,device=__a )
original_model.eval()
print('''Done!''' )
# update state dict keys
_a : Any = original_model.state_dict()
_a : Optional[Any] = create_rename_keys(__a )
for src, dest in rename_keys:
rename_key(__a ,__a ,__a )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_a : str = state_dict.pop(__a )
if key.startswith('''Qformer.bert''' ):
_a : List[Any] = key.replace('''Qformer.bert''' ,'''qformer''' )
if "attention.self" in key:
_a : Tuple = key.replace('''self''' ,'''attention''' )
if "llm_proj" in key:
_a : Union[str, Any] = key.replace('''llm_proj''' ,'''language_projection''' )
if "t5_proj" in key:
_a : int = key.replace('''t5_proj''' ,'''language_projection''' )
if key.startswith('''llm_model''' ):
_a : List[str] = key.replace('''llm_model''' ,'''language_model''' )
if key.startswith('''t5''' ):
_a : List[str] = key.replace('''t5''' ,'''language''' )
_a : Union[str, Any] = val
# read in qv biases
read_in_q_v_bias(__a ,__a )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__a ,strict=__a )
_a : Optional[Any] = load_demo_image()
_a : Union[str, Any] = '''What is unusual about this image?'''
# create processor
_a : Tuple = BlipImageProcessor(
size={'''height''': image_size, '''width''': image_size} ,image_mean=__a ,image_std=__a )
_a : Dict = InstructBlipProcessor(
image_processor=__a ,tokenizer=__a ,qformer_tokenizer=__a ,)
_a : str = processor(images=__a ,text=__a ,return_tensors='''pt''' ).to(__a )
# make sure processor creates exact same pixel values
_a : Optional[Any] = vis_processors['''eval'''](__a ).unsqueeze(0 ).to(__a )
_a : List[Any] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) ,__a )
original_model.to(__a )
hf_model.to(__a )
with torch.no_grad():
if "vicuna" in model_name:
_a : Any = original_model({'''image''': original_pixel_values, '''text_input''': [prompt]} ).logits
_a : Optional[int] = hf_model(**__a ).logits
else:
_a : Optional[int] = original_model(
{'''image''': original_pixel_values, '''text_input''': [prompt], '''text_output''': ['''\n''']} ).logits
_a : Optional[Any] = tokenizer('''\n''' ,return_tensors='''pt''' ).input_ids.to(__a )
_a : int = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id ,-100 )
_a : int = hf_model(**__a ,labels=__a ).logits
print('''First values of original logits:''' ,original_logits[0, :3, :3] )
print('''First values of HF logits:''' ,logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
_a : Optional[Any] = 1E-4 if '''vicuna''' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) ,__a ,atol=__a )
print('''Looks ok!''' )
print('''Generating with original model...''' )
_a : Union[str, Any] = original_model.generate({'''image''': original_pixel_values, '''prompt''': prompt} ,num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('''Generating with HF model...''' )
_a : Optional[int] = hf_model.generate(
**__a ,do_sample=__a ,num_beams=5 ,max_length=256 ,min_length=1 ,top_p=0.9 ,repetition_penalty=1.5 ,length_penalty=1.0 ,temperature=1 ,)
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
_a : Optional[Any] = 2
print('''Original generation:''' ,__a )
_a : Dict = processor.batch_decode(__a ,skip_special_tokens=__a )
_a : Optional[int] = [text.strip() for text in output_text]
print('''HF generation:''' ,__a )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__a )
hf_model.save_pretrained(__a )
if push_to_hub:
processor.push_to_hub(F"""Salesforce/{model_name}""" )
hf_model.push_to_hub(F"""Salesforce/{model_name}""" )
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
a__ = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
a__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 707
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
a__ = logging.get_logger(__name__)
a__ = Dict[str, Any]
a__ = List[Prediction]
@add_end_docstrings(__lowercase )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self , *_a , **_a ) -> Optional[Any]:
super().__init__(*_a , **_a )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , '''vision''' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __lowercase ( self , **_a ) -> int:
_a : List[str] = {}
if "threshold" in kwargs:
_a : List[str] = kwargs['''threshold''']
return {}, {}, postprocess_kwargs
def __call__( self , *_a , **_a ) -> Union[Predictions, List[Prediction]]:
return super().__call__(*_a , **_a )
def __lowercase ( self , _a ) -> Any:
_a : Optional[int] = load_image(_a )
_a : str = torch.IntTensor([[image.height, image.width]] )
_a : Optional[Any] = self.image_processor(images=[image] , return_tensors='''pt''' )
if self.tokenizer is not None:
_a : Union[str, Any] = self.tokenizer(text=inputs['''words'''] , boxes=inputs['''boxes'''] , return_tensors='''pt''' )
_a : Optional[int] = target_size
return inputs
def __lowercase ( self , _a ) -> Optional[Any]:
_a : str = model_inputs.pop('''target_size''' )
_a : Dict = self.model(**_a )
_a : List[Any] = outputs.__class__({'''target_size''': target_size, **outputs} )
if self.tokenizer is not None:
_a : int = model_inputs['''bbox''']
return model_outputs
def __lowercase ( self , _a , _a=0.9 ) -> Any:
_a : int = model_outputs['''target_size''']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
_a , _a : Any = target_size[0].tolist()
def unnormalize(_a ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1_0_0_0),
(height * bbox[1] / 1_0_0_0),
(width * bbox[2] / 1_0_0_0),
(height * bbox[3] / 1_0_0_0),
] ) )
_a , _a : Tuple = model_outputs['''logits'''].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
_a : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
_a : Optional[Any] = [unnormalize(_a ) for bbox in model_outputs['''bbox'''].squeeze(0 )]
_a : Dict = ['''score''', '''label''', '''box''']
_a : Optional[int] = [dict(zip(_a , _a ) ) for vals in zip(scores.tolist() , _a , _a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
_a : List[str] = self.image_processor.post_process_object_detection(_a , _a , _a )
_a : Optional[int] = raw_annotations[0]
_a : Any = raw_annotation['''scores''']
_a : Any = raw_annotation['''labels''']
_a : List[str] = raw_annotation['''boxes''']
_a : Union[str, Any] = scores.tolist()
_a : Optional[Any] = [self.model.config.idalabel[label.item()] for label in labels]
_a : Any = [self._get_bounding_box(_a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
_a : Tuple = ['''score''', '''label''', '''box''']
_a : Optional[int] = [
dict(zip(_a , _a ) )
for vals in zip(raw_annotation['''scores'''] , raw_annotation['''labels'''] , raw_annotation['''boxes'''] )
]
return annotation
def __lowercase ( self , _a ) -> Dict[str, int]:
if self.framework != "pt":
raise ValueError('''The ObjectDetectionPipeline is only available in PyTorch.''' )
_a , _a , _a , _a : List[Any] = box.int().tolist()
_a : Optional[int] = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 578
| 0
|
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _UpperCamelCase (a_ , unittest.TestCase ):
snake_case_ = BlenderbotSmallTokenizer
snake_case_ = False
def __UpperCAmelCase ( self )-> Optional[int]:
super().setUp()
__lowerCAmelCase = ["__start__", "adapt", "act", "ap@@", "te", "__end__", "__unk__"]
__lowerCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
__lowerCAmelCase = ["#version: 0.2", "a p", "t e</w>", "ap t</w>", "a d", "ad apt</w>", "a c", "ac t</w>", ""]
__lowerCAmelCase = {"unk_token": "__unk__", "bos_token": "__start__", "eos_token": "__end__"}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__UpperCamelCase ) )
def __UpperCAmelCase ( self , **__UpperCamelCase )-> Any:
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase )-> Dict:
__lowerCAmelCase = "adapt act apte"
__lowerCAmelCase = "adapt act apte"
return input_text, output_text
def __UpperCAmelCase ( self )-> str:
__lowerCAmelCase = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase = "adapt act apte"
__lowerCAmelCase = ["adapt", "act", "ap@@", "te"]
__lowerCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
__lowerCAmelCase = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__lowerCAmelCase = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def __UpperCAmelCase ( self )-> Tuple:
__lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
assert tok("sam" ).input_ids == [1_3_8_4]
__lowerCAmelCase = "I am a small frog."
__lowerCAmelCase = tok([src_text] , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = tok.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def __UpperCAmelCase ( self )-> List[Any]:
__lowerCAmelCase = BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
__lowerCAmelCase = "I am a small frog ."
__lowerCAmelCase = "."
__lowerCAmelCase = tok(__UpperCamelCase )["input_ids"]
__lowerCAmelCase = tok(__UpperCamelCase )["input_ids"]
assert encoded[-1] == encoded_dot[0]
| 367
|
from math import pow, sqrt
def __lowerCAmelCase ( *__snake_case ):
__lowerCAmelCase = len(__snake_case ) > 0 and all(value > 0.0 for value in values )
return result
def __lowerCAmelCase ( __snake_case , __snake_case ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case )
else ValueError("Input Error: Molar mass values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
def __lowerCAmelCase ( __snake_case , __snake_case , __snake_case ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(__snake_case , __snake_case , __snake_case )
else ValueError(
"Input Error: Molar mass and effusion rate values must greater than 0." )
)
| 367
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ : int = logging.get_logger(__name__)
lowercase_ : Optional[int] = {
'''transfo-xl-wt103''': '''https://huggingface.co/transfo-xl-wt103/resolve/main/config.json''',
}
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Dict = "transfo-xl"
_UpperCamelCase : Dict = ["mems"]
_UpperCamelCase : Union[str, Any] = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : int , lowerCamelCase_ : List[str]=26_77_35 , lowerCamelCase_ : List[str]=[2_00_00, 4_00_00, 20_00_00] , lowerCamelCase_ : List[Any]=10_24 , lowerCamelCase_ : Dict=10_24 , lowerCamelCase_ : Union[str, Any]=16 , lowerCamelCase_ : int=64 , lowerCamelCase_ : List[Any]=40_96 , lowerCamelCase_ : str=4 , lowerCamelCase_ : Optional[Any]=False , lowerCamelCase_ : Optional[int]=18 , lowerCamelCase_ : List[Any]=16_00 , lowerCamelCase_ : Dict=10_00 , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=True , lowerCamelCase_ : Any=0 , lowerCamelCase_ : str=-1 , lowerCamelCase_ : List[Any]=True , lowerCamelCase_ : int=0.1 , lowerCamelCase_ : Optional[int]=0.0 , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[str]="normal" , lowerCamelCase_ : Union[str, Any]=0.01 , lowerCamelCase_ : Optional[int]=0.01 , lowerCamelCase_ : int=0.02 , lowerCamelCase_ : Optional[int]=1e-5 , lowerCamelCase_ : int=0 , **lowerCamelCase_ : List[Any] , ):
'''simple docstring'''
_snake_case : Union[str, Any] = vocab_size
_snake_case : Any = []
self.cutoffs.extend(lowerCamelCase_ )
if proj_share_all_but_first:
_snake_case : Any = [False] + [True] * len(self.cutoffs )
else:
_snake_case : Union[str, Any] = [False] + [False] * len(self.cutoffs )
_snake_case : Any = d_model
_snake_case : Tuple = d_embed
_snake_case : Any = d_head
_snake_case : int = d_inner
_snake_case : Optional[Any] = div_val
_snake_case : Dict = pre_lnorm
_snake_case : Optional[Any] = n_layer
_snake_case : Optional[int] = n_head
_snake_case : Optional[Any] = mem_len
_snake_case : Union[str, Any] = same_length
_snake_case : str = attn_type
_snake_case : str = clamp_len
_snake_case : str = sample_softmax
_snake_case : str = adaptive
_snake_case : int = dropout
_snake_case : Optional[Any] = dropatt
_snake_case : str = untie_r
_snake_case : Dict = init
_snake_case : str = init_range
_snake_case : Optional[Any] = proj_init_std
_snake_case : Optional[Any] = init_std
_snake_case : Any = layer_norm_epsilon
super().__init__(eos_token_id=lowerCamelCase_ , **lowerCamelCase_ )
@property
def __UpperCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __UpperCAmelCase ( self : str , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 706
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Optional[Any] = {
'''configuration_roc_bert''': ['''ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoCBertConfig'''],
'''tokenization_roc_bert''': ['''RoCBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Tuple = [
'''ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoCBertForCausalLM''',
'''RoCBertForMaskedLM''',
'''RoCBertForMultipleChoice''',
'''RoCBertForPreTraining''',
'''RoCBertForQuestionAnswering''',
'''RoCBertForSequenceClassification''',
'''RoCBertForTokenClassification''',
'''RoCBertLayer''',
'''RoCBertModel''',
'''RoCBertPreTrainedModel''',
'''load_tf_weights_in_roc_bert''',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowercase_ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 652
| 0
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase : Union[str, Any] = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowercase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 336
|
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self ,snake_case ,snake_case=3 ,snake_case=32 ,snake_case=3 ,snake_case=10 ,snake_case=[10, 20, 30, 40] ,snake_case=[1, 1, 2, 1] ,snake_case=True ,snake_case=True ,snake_case="relu" ,snake_case=3 ,snake_case=None ,):
'''simple docstring'''
lowercase : List[str] = parent
lowercase : Union[str, Any] = batch_size
lowercase : Union[str, Any] = image_size
lowercase : Optional[Any] = num_channels
lowercase : Any = embeddings_size
lowercase : str = hidden_sizes
lowercase : Union[str, Any] = depths
lowercase : List[Any] = is_training
lowercase : Union[str, Any] = use_labels
lowercase : Dict = hidden_act
lowercase : str = num_labels
lowercase : Union[str, Any] = scope
lowercase : Optional[Any] = len(snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : str = None
if self.use_labels:
lowercase : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
lowercase : Any = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels ,embeddings_size=self.embeddings_size ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,hidden_act=self.hidden_act ,num_labels=self.num_labels ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : Any = TFRegNetModel(config=snake_case )
lowercase : int = model(snake_case ,training=snake_case )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
lowercase : str = self.num_labels
lowercase : Any = TFRegNetForImageClassification(snake_case )
lowercase : int = model(snake_case ,labels=snake_case ,training=snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Union[str, Any] = config_and_inputs
lowercase : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Optional[int]= (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
_a : Optional[Any]= (
{"feature-extraction": TFRegNetModel, "image-classification": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
_a : Dict= False
_a : List[str]= False
_a : str= False
_a : str= False
_a : Union[str, Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = TFRegNetModelTester(self )
lowercase : Tuple = ConfigTester(self ,config_class=snake_case ,has_text_modality=snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("""GPU""" ) ) == 0 ,reason="""TF does not support backprop for grouped convolutions on CPU.""" ,)
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Tuple = model_class(snake_case )
lowercase : Optional[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Any = [*signature.parameters.keys()]
lowercase : Union[str, Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case ,snake_case ,snake_case ):
lowercase : int = model_class(snake_case )
lowercase : List[Any] = model(**self._prepare_for_class(snake_case ,snake_case ) ,training=snake_case )
lowercase : List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
lowercase : int = self.model_tester.num_stages
self.assertEqual(len(snake_case ) ,expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 2, self.model_tester.image_size // 2] ,)
lowercase , lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Union[str, Any] = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
lowercase : List[str] = layer_type
lowercase : Dict = True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Tuple = True
check_hidden_states_output(snake_case ,snake_case ,snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase , lowercase : str = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(snake_case ,snake_case ,snake_case ,snake_case={} ):
lowercase : Optional[Any] = model(snake_case ,return_dict=snake_case ,**snake_case )
lowercase : List[Any] = model(snake_case ,return_dict=snake_case ,**snake_case ).to_tuple()
def recursive_check(snake_case ,snake_case ):
if isinstance(snake_case ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(snake_case ,snake_case ):
recursive_check(snake_case ,snake_case )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(snake_case ,snake_case ) ) ,msg=(
"""Tuple and dict output are not equal. Difference:"""
f" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) ,)
recursive_check(snake_case ,snake_case )
for model_class in self.all_model_classes:
lowercase : Union[str, Any] = model_class(snake_case )
lowercase : Optional[Any] = self._prepare_for_class(snake_case ,snake_case )
lowercase : Dict = self._prepare_for_class(snake_case ,snake_case )
check_equivalence(snake_case ,snake_case ,snake_case )
lowercase : str = self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
lowercase : List[Any] = self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
check_equivalence(snake_case ,snake_case ,snake_case )
lowercase : Optional[int] = self._prepare_for_class(snake_case ,snake_case )
lowercase : str = self._prepare_for_class(snake_case ,snake_case )
check_equivalence(snake_case ,snake_case ,snake_case ,{"""output_hidden_states""": True} )
lowercase : Optional[Any] = self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
lowercase : Optional[int] = self._prepare_for_class(snake_case ,snake_case ,return_labels=snake_case )
check_equivalence(snake_case ,snake_case ,snake_case ,{"""output_hidden_states""": True} )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = TFRegNetModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def _snake_case( ) -> Any:
lowercase : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __snake_case ( unittest.TestCase ):
@cached_property
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
lowercase : List[Any] = self.default_image_processor
lowercase : Optional[Any] = prepare_img()
lowercase : Union[str, Any] = image_processor(images=snake_case ,return_tensors="""tf""" )
# forward pass
lowercase : List[Any] = model(**snake_case ,training=snake_case )
# verify the logits
lowercase : List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,snake_case )
lowercase : int = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,snake_case ,atol=1e-4 )
| 336
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCamelCase__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = StableDiffusionLatentUpscalePipeline
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
"""height""",
"""width""",
"""cross_attention_kwargs""",
"""negative_prompt_embeds""",
"""prompt_embeds""",
}
__magic_name__ = PipelineTesterMixin.required_optional_params - {"""num_images_per_prompt"""}
__magic_name__ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
__magic_name__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__magic_name__ = frozenset([] )
__magic_name__ = True
@property
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : str = 1
_lowerCAmelCase : Optional[int] = 4
_lowerCAmelCase : List[str] = (16, 16)
_lowerCAmelCase : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase_ )
return image
def a ( self ):
'''simple docstring'''
torch.manual_seed(0 )
_lowerCAmelCase : Any = UNetaDConditionModel(
act_fn='gelu' , attention_head_dim=8 , norm_num_groups=UpperCAmelCase_ , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'KDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
'KCrossAttnDownBlock2D',
) , in_channels=8 , mid_block_type=UpperCAmelCase_ , only_cross_attention=UpperCAmelCase_ , out_channels=5 , resnet_time_scale_shift='scale_shift' , time_embedding_type='fourier' , timestep_post_act='gelu' , up_block_types=('KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KCrossAttnUpBlock2D', 'KUpBlock2D') , )
_lowerCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
'DownEncoderBlock2D',
] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
_lowerCAmelCase : Union[str, Any] = EulerDiscreteScheduler(prediction_type='sample' )
_lowerCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='quick_gelu' , projection_dim=512 , )
_lowerCAmelCase : Dict = CLIPTextModel(UpperCAmelCase_ )
_lowerCAmelCase : str = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowerCAmelCase : List[Any] = {
'unet': model.eval(),
'vae': vae.eval(),
'scheduler': scheduler,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def a ( self , snake_case__ , snake_case__=0 ):
'''simple docstring'''
if str(UpperCAmelCase_ ).startswith('mps' ):
_lowerCAmelCase : Any = torch.manual_seed(UpperCAmelCase_ )
else:
_lowerCAmelCase : str = torch.Generator(device=UpperCAmelCase_ ).manual_seed(UpperCAmelCase_ )
_lowerCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'image': self.dummy_image.cpu(),
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = 'cpu'
_lowerCAmelCase : Optional[int] = self.get_dummy_components()
_lowerCAmelCase : Tuple = self.pipeline_class(**UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : Dict = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : str = pipe(**UpperCAmelCase_ ).images
_lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
_lowerCAmelCase : Union[str, Any] = np.array(
[0.4722_2412, 0.4192_1633, 0.4471_7434, 0.4687_4192, 0.4258_8258, 0.4615_0726, 0.467_7534, 0.4558_3832, 0.4857_9055] )
_lowerCAmelCase : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase_ , 1E-3 )
def a ( self ):
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7E-3 )
def a ( self ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7E-3 )
def a ( self ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3E-3 )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = [
'DDIMScheduler',
'DDPMScheduler',
'PNDMScheduler',
'HeunDiscreteScheduler',
'EulerAncestralDiscreteScheduler',
'KDPM2DiscreteScheduler',
'KDPM2AncestralDiscreteScheduler',
'DPMSolverSDEScheduler',
]
_lowerCAmelCase : List[str] = self.get_dummy_components()
_lowerCAmelCase : Union[str, Any] = self.pipeline_class(**UpperCAmelCase_ )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCAmelCase_ )
pipe.to(UpperCAmelCase_ )
pipe.set_progress_bar_config(disable=UpperCAmelCase_ )
_lowerCAmelCase : Optional[int] = self.get_dummy_inputs(UpperCAmelCase_ )
_lowerCAmelCase : Dict = 2
_lowerCAmelCase : List[Any] = []
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
_lowerCAmelCase : Optional[int] = getattr(UpperCAmelCase_ , scheduler_enum.name )
_lowerCAmelCase : List[str] = scheduler_cls.from_config(pipe.scheduler.config )
_lowerCAmelCase : int = pipe(**UpperCAmelCase_ )[0]
outputs.append(UpperCAmelCase_ )
assert check_same_shape(UpperCAmelCase_ )
@require_torch_gpu
@slow
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = torch.manual_seed(33 )
_lowerCAmelCase : Tuple = StableDiffusionPipeline.from_pretrained('CompVis/stable-diffusion-v1-4' , torch_dtype=torch.floataa )
pipe.to('cuda' )
_lowerCAmelCase : Tuple = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowerCAmelCase : Union[str, Any] = 'a photo of an astronaut high resolution, unreal engine, ultra realistic'
_lowerCAmelCase : str = pipe(UpperCAmelCase_ , generator=UpperCAmelCase_ , output_type='latent' ).images
_lowerCAmelCase : int = upscaler(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase_ , output_type='np' , ).images[0]
_lowerCAmelCase : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy' )
assert np.abs((expected_image - image).mean() ) < 5E-2
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = torch.manual_seed(33 )
_lowerCAmelCase : List[str] = StableDiffusionLatentUpscalePipeline.from_pretrained(
'stabilityai/sd-x2-latent-upscaler' , torch_dtype=torch.floataa )
upscaler.to('cuda' )
_lowerCAmelCase : Union[str, Any] = 'the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'
_lowerCAmelCase : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png' )
_lowerCAmelCase : int = upscaler(
prompt=UpperCAmelCase_ , image=UpperCAmelCase_ , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase_ , output_type='np' , ).images[0]
_lowerCAmelCase : Optional[Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy' )
assert np.abs((expected_image - image).max() ) < 5E-2
| 711
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , 'width_multiplier' ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=64 , snake_case__=2 , snake_case__=3 , snake_case__="swish" , snake_case__=3 , snake_case__=32 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=True , snake_case__=True , snake_case__=10 , snake_case__=None , snake_case__=0.25 , snake_case__=0.0 , snake_case__=0.0 , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : List[Any] = image_size
_lowerCAmelCase : List[Any] = patch_size
_lowerCAmelCase : List[str] = num_channels
_lowerCAmelCase : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
_lowerCAmelCase : Optional[Any] = hidden_act
_lowerCAmelCase : List[Any] = conv_kernel_size
_lowerCAmelCase : Optional[Any] = output_stride
_lowerCAmelCase : List[Any] = classifier_dropout_prob
_lowerCAmelCase : str = use_labels
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : str = scope
_lowerCAmelCase : Any = width_multiplier
_lowerCAmelCase : Union[str, Any] = ffn_dropout
_lowerCAmelCase : Optional[int] = attn_dropout
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Dict = None
if self.use_labels:
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def a ( self ):
'''simple docstring'''
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : str = model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : List[Any] = MobileViTVaForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : int = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[int] = MobileViTVaForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
_lowerCAmelCase : Dict = model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_lowerCAmelCase : Any = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = config_and_inputs
_lowerCAmelCase : Tuple = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
__magic_name__ = (
{
"feature-extraction": MobileViTVaModel,
"image-classification": MobileViTVaForImageClassification,
"image-segmentation": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
__magic_name__ = False
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : int = MobileViTVaModelTester(self )
_lowerCAmelCase : Dict = MobileViTVaConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileViTV2 does not use inputs_embeds' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not support input and output embeddings' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileViTV2 does not output attentions' )
def a ( self ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' )
def a ( self ):
'''simple docstring'''
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def a ( self ):
'''simple docstring'''
pass
def a ( self ):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : str = model_class(snake_case__ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : int = [*signature.parameters.keys()]
_lowerCAmelCase : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def a ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ):
_lowerCAmelCase : Dict = model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
_lowerCAmelCase : List[str] = outputs.hidden_states
_lowerCAmelCase : List[str] = 5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_lowerCAmelCase : List[Any] = 2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCAmelCase : Any = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def a ( self ):
'''simple docstring'''
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = MobileViTVaModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase ():
"""simple docstring"""
_lowerCAmelCase : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def a ( self ):
'''simple docstring'''
return (
MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' )
if is_vision_available()
else None
)
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to(
snake_case__ )
_lowerCAmelCase : str = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : Optional[int] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Tuple = model(**snake_case__ )
# verify the logits
_lowerCAmelCase : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCAmelCase : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Any = model.to(snake_case__ )
_lowerCAmelCase : int = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Optional[int] = prepare_img()
_lowerCAmelCase : Dict = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : int = model(**snake_case__ )
_lowerCAmelCase : Dict = outputs.logits
# verify the logits
_lowerCAmelCase : str = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
_lowerCAmelCase : Any = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def a ( self ):
'''simple docstring'''
_lowerCAmelCase : Any = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : List[Any] = model.to(snake_case__ )
_lowerCAmelCase : str = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' )
_lowerCAmelCase : Tuple = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case__ , return_tensors='pt' ).to(snake_case__ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Any = model(**snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.logits.detach().cpu()
_lowerCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
_lowerCAmelCase : List[Any] = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
_lowerCAmelCase : List[str] = image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
_lowerCAmelCase : Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 630
| 0
|
import datasets
from .evaluate import evaluate
a_ = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
a_ = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
a_ = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def lowerCamelCase ( self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': {'''id''': datasets.Value('''string''' ), '''prediction_text''': datasets.Value('''string''' )},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ) , codebase_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , reference_urls=['''https://rajpurkar.github.io/SQuAD-explorer/'''] , )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
__lowerCamelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
__lowerCamelCase = evaluate(dataset=__UpperCAmelCase , predictions=__UpperCAmelCase )
return score
| 175
|
def a__ ( _UpperCamelCase : list[int] ):
if not numbers:
return 0
if not isinstance(_UpperCamelCase ,(list, tuple) ) or not all(
isinstance(_UpperCamelCase ,_UpperCamelCase ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
__lowerCamelCase = __lowerCamelCase = __lowerCamelCase = numbers[0]
for i in range(1 ,len(_UpperCamelCase ) ):
# update the maximum and minimum subarray products
__lowerCamelCase = numbers[i]
if number < 0:
__lowerCamelCase ,__lowerCamelCase = min_till_now, max_till_now
__lowerCamelCase = max(_UpperCamelCase ,max_till_now * number )
__lowerCamelCase = min(_UpperCamelCase ,min_till_now * number )
# update the maximum product found till now
__lowerCamelCase = max(_UpperCamelCase ,_UpperCamelCase )
return max_prod
| 175
| 1
|
'''simple docstring'''
def _UpperCamelCase ( __A : Optional[Any] , __A : Optional[int] ) -> Tuple:
'''simple docstring'''
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(__A , int(b / 2 ) ) * actual_power(__A , int(b / 2 ) )
else:
return a * actual_power(__A , int(b / 2 ) ) * actual_power(__A , int(b / 2 ) )
def _UpperCamelCase ( __A : List[str] , __A : Optional[Any] ) -> float:
'''simple docstring'''
if b < 0:
return 1 / actual_power(__A , __A )
return actual_power(__A , __A )
if __name__ == "__main__":
print(power(-2, -3))
| 708
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ : int = logging.get_logger(__name__)
a__ : Optional[int] = {
'facebook/xlm-roberta-xl': 'https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json',
'facebook/xlm-roberta-xxl': 'https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json',
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class lowercase_ ( a__ ):
__UpperCAmelCase = 'xlm-roberta-xl'
def __init__( self , a=25_08_80 , a=25_60 , a=36 , a=32 , a=1_02_40 , a="gelu" , a=0.1 , a=0.1 , a=5_14 , a=1 , a=0.02 , a=1e-05 , a=1 , a=0 , a=2 , a="absolute" , a=True , a=None , **a , ):
super().__init__(pad_token_id=a , bos_token_id=a , eos_token_id=a , **a )
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = hidden_act
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = layer_norm_eps
UpperCamelCase__ = position_embedding_type
UpperCamelCase__ = use_cache
UpperCamelCase__ = classifier_dropout
class lowercase_ ( a__ ):
@property
def __a ( self ):
if self.task == "multiple-choice":
UpperCamelCase__ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase__ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 223
| 0
|
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : int ):
__UpperCAmelCase : int = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def lowerCamelCase__ ( ):
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: float | Decimal , _lowerCamelCase: float = 10**-10 ):
__SCREAMING_SNAKE_CASE : List[Any] = a
while True:
__SCREAMING_SNAKE_CASE : Optional[Any] = Decimal(_lowerCamelCase ) - (
Decimal(eval(_lowerCamelCase ) ) / Decimal(eval(str(diff(_lowerCamelCase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(_lowerCamelCase ) ) < precision: # noqa: S307
return float(_lowerCamelCase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 578
| 0
|
from __future__ import annotations
from PIL import Image
# Define glider example
a__ : List[str] = [
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
]
# Define blinker example
a__ : List[Any] = [[0, 1, 0], [0, 1, 0], [0, 1, 0]]
def snake_case (UpperCamelCase : list[list[int]] ):
'''simple docstring'''
lowerCamelCase__ = []
for i in range(len(UpperCamelCase ) ):
lowerCamelCase__ = []
for j in range(len(cells[i] ) ):
# Get the number of live neighbours
lowerCamelCase__ = 0
if i > 0 and j > 0:
neighbour_count += cells[i - 1][j - 1]
if i > 0:
neighbour_count += cells[i - 1][j]
if i > 0 and j < len(cells[i] ) - 1:
neighbour_count += cells[i - 1][j + 1]
if j > 0:
neighbour_count += cells[i][j - 1]
if j < len(cells[i] ) - 1:
neighbour_count += cells[i][j + 1]
if i < len(UpperCamelCase ) - 1 and j > 0:
neighbour_count += cells[i + 1][j - 1]
if i < len(UpperCamelCase ) - 1:
neighbour_count += cells[i + 1][j]
if i < len(UpperCamelCase ) - 1 and j < len(cells[i] ) - 1:
neighbour_count += cells[i + 1][j + 1]
# Rules of the game of life (excerpt from Wikipedia):
# 1. Any live cell with two or three live neighbours survives.
# 2. Any dead cell with three live neighbours becomes a live cell.
# 3. All other live cells die in the next generation.
# Similarly, all other dead cells stay dead.
lowerCamelCase__ = cells[i][j] == 1
if (
(alive and 2 <= neighbour_count <= 3)
or not alive
and neighbour_count == 3
):
next_generation_row.append(1 )
else:
next_generation_row.append(0 )
next_generation.append(UpperCamelCase )
return next_generation
def snake_case (UpperCamelCase : list[list[int]] , UpperCamelCase : int ):
'''simple docstring'''
lowerCamelCase__ = []
for _ in range(UpperCamelCase ):
# Create output image
lowerCamelCase__ = Image.new("""RGB""" , (len(cells[0] ), len(UpperCamelCase )) )
lowerCamelCase__ = img.load()
# Save cells to image
for x in range(len(UpperCamelCase ) ):
for y in range(len(cells[0] ) ):
lowerCamelCase__ = 255 - cells[y][x] * 255
lowerCamelCase__ = (colour, colour, colour)
# Save image
images.append(UpperCamelCase )
lowerCamelCase__ = new_generation(UpperCamelCase )
return images
if __name__ == "__main__":
a__ : Dict = generate_images(GLIDER, 1_6)
images[0].save("""out.gif""", save_all=True, append_images=images[1:])
| 706
|
from __future__ import annotations
from scipy.special import comb # type: ignore
class lowercase :
"""simple docstring"""
def __init__( self : Optional[int] , a_ : list[tuple[float, float]] ):
"""simple docstring"""
lowerCamelCase__ = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCamelCase__ = len(a_ ) - 1
def _UpperCamelCase ( self : Union[str, Any] , a_ : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase__ = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , a_ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(a_ ) , 5 ) == 1
return output_values
def _UpperCamelCase ( self : int , a_ : float ):
"""simple docstring"""
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCamelCase__ = self.basis_function(a_ )
lowerCamelCase__ = 0.0
lowerCamelCase__ = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def _UpperCamelCase ( self : str , a_ : float = 0.0_1 ):
"""simple docstring"""
from matplotlib import pyplot as plt # type: ignore
lowerCamelCase__ = [] # x coordinates of points to plot
lowerCamelCase__ = [] # y coordinates of points to plot
lowerCamelCase__ = 0.0
while t <= 1:
lowerCamelCase__ = self.bezier_curve_function(a_ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCamelCase__ = [i[0] for i in self.list_of_points]
lowerCamelCase__ = [i[1] for i in self.list_of_points]
plt.plot(
a_ , a_ , color="""blue""" , label="""Curve of Degree """ + str(self.degree ) , )
plt.scatter(a_ , a_ , color="""red""" , label="""Control Points""" )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 235
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"caidas/swin2sr-classicalsr-x2-64": (
"https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"
),
}
class __magic_name__ ( _UpperCamelCase ):
_SCREAMING_SNAKE_CASE : Dict = """swin2sr"""
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""hidden_size""": """embed_dim""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Dict , snake_case_ : str=64 , snake_case_ : Any=1 , snake_case_ : str=3 , snake_case_ : Dict=180 , snake_case_ : List[str]=[6, 6, 6, 6, 6, 6] , snake_case_ : str=[6, 6, 6, 6, 6, 6] , snake_case_ : Any=8 , snake_case_ : Tuple=2.0 , snake_case_ : List[Any]=True , snake_case_ : int=0.0 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : str=0.1 , snake_case_ : Any="gelu" , snake_case_ : int=False , snake_case_ : List[str]=0.02 , snake_case_ : List[str]=1e-5 , snake_case_ : Optional[int]=2 , snake_case_ : Any=1.0 , snake_case_ : Union[str, Any]="1conv" , snake_case_ : Optional[Any]="pixelshuffle" , **snake_case_ : Union[str, Any] , ):
super().__init__(**UpperCamelCase__ )
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = embed_dim
__snake_case = depths
__snake_case = len(UpperCamelCase__ )
__snake_case = num_heads
__snake_case = window_size
__snake_case = mlp_ratio
__snake_case = qkv_bias
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = drop_path_rate
__snake_case = hidden_act
__snake_case = use_absolute_embeddings
__snake_case = layer_norm_eps
__snake_case = initializer_range
__snake_case = upscale
__snake_case = img_range
__snake_case = resi_connection
__snake_case = upsampler
| 163
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__A : Any = logging.get_logger(__name__)
__A : Optional[Any] = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = """gpt_neo"""
__magic_name__ : Union[str, Any] = ["""past_key_values"""]
__magic_name__ : Dict = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Dict , UpperCamelCase__ : List[Any]=50257 , UpperCamelCase__ : Optional[Any]=2048 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : int=24 , UpperCamelCase__ : Dict=[[["global", "local"], 12]] , UpperCamelCase__ : Optional[Any]=16 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : str=256 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=1E-5 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : Optional[Any]=50256 , UpperCamelCase__ : List[str]=50256 , **UpperCamelCase__ : str , ):
A__ : Optional[Any] =vocab_size
A__ : Dict =max_position_embeddings
A__ : List[str] =hidden_size
A__ : List[Any] =num_layers
A__ : Tuple =num_heads
A__ : List[str] =intermediate_size
A__ : Tuple =window_size
A__ : Dict =activation_function
A__ : str =resid_dropout
A__ : Union[str, Any] =embed_dropout
A__ : List[str] =attention_dropout
A__ : Tuple =classifier_dropout
A__ : int =layer_norm_epsilon
A__ : int =initializer_range
A__ : str =use_cache
A__ : Tuple =bos_token_id
A__ : int =eos_token_id
A__ : int =attention_types
A__ : Any =self.expand_attention_types_params(UpperCamelCase__ )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
F'''but is `len(config.attention_layers) = {len(self.attention_layers )}`, '''
F'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument." )
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
@staticmethod
def _UpperCAmelCase ( UpperCamelCase__ : List[str] ):
A__ : Optional[Any] =[]
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def lowercase ( UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ):
"""simple docstring"""
import torch
A__ : List[str] =input.size()
A__ : Dict =len(UpperCamelCase )
A__ : Optional[int] =shape[dimension]
A__ : str =torch.arange(0 , UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =torch.div(sizedim - size , UpperCamelCase , rounding_mode="floor" ) + 1
A__ : str =torch.arange(UpperCamelCase ) + low_indices[:min_length][:, None]
A__ : Tuple =[slice(UpperCamelCase )] * rank
A__ : int =indices
A__ : Optional[int] =input[s]
A__ : Union[str, Any] =list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(UpperCamelCase )
def lowercase ( UpperCamelCase : str , UpperCamelCase : Any ):
"""simple docstring"""
import torch
A__ : List[str] =torch.arange(1 , UpperCamelCase )
A__ : List[Any] =torch.remainder(UpperCamelCase , UpperCamelCase )
A__ : Optional[int] =remainders == 0
A__ : str =candidates[divisor_indices]
A__ : int =torch.max(UpperCamelCase )
return largest_divisor, torch.div(UpperCamelCase , UpperCamelCase , rounding_mode="floor" )
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
@property
def _UpperCAmelCase ( self : List[Any] ):
A__ : Optional[int] =OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction="inputs" )
A__ : Optional[int] ={0: "batch", 1: "past_sequence + sequence"}
else:
A__ : Tuple ={0: "batch", 1: "sequence"}
return common_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return self._config.num_heads
def _UpperCAmelCase ( self : int , UpperCamelCase__ : PreTrainedTokenizer , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional[TensorType] = None , ):
A__ : Union[str, Any] =super(UpperCamelCase__ , self ).generate_dummy_inputs(
UpperCamelCase__ , batch_size=UpperCamelCase__ , seq_length=UpperCamelCase__ , is_pair=UpperCamelCase__ , framework=UpperCamelCase__ )
# We need to order the input in the way they appears in the forward()
A__ : List[Any] =OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
A__ , A__ : Union[str, Any] =common_inputs["input_ids"].shape
# Not using the same length for past_key_values
A__ : Union[str, Any] =seqlen + 2
A__ : List[Any] =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
A__ : Optional[Any] =[
(torch.zeros(UpperCamelCase__ ), torch.zeros(UpperCamelCase__ )) for _ in range(self.num_layers )
]
A__ : Optional[Any] =common_inputs["attention_mask"]
if self.use_past:
A__ : Any =ordered_inputs["attention_mask"].dtype
A__ : Tuple =torch.cat(
[ordered_inputs["attention_mask"], torch.ones(UpperCamelCase__ , UpperCamelCase__ , dtype=UpperCamelCase__ )] , dim=1 )
return ordered_inputs
@property
def _UpperCAmelCase ( self : List[str] ):
return 13
| 656
| 0
|
from __future__ import annotations
from random import choice
def __lowerCAmelCase ( UpperCamelCase ) -> Tuple:
return choice(UpperCamelCase )
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> int:
lowerCAmelCase__ : int = random_pivot(UpperCamelCase )
# partition based on pivot
# linear time
lowerCAmelCase__ : Any = [e for e in lst if e < pivot]
lowerCAmelCase__ : List[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCamelCase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCamelCase ) < k - 1:
return kth_number(UpperCamelCase , k - len(UpperCamelCase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 470
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _lowerCAmelCase ( _lowercase , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
@property
def __magic_name__( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__( self ):
lowerCAmelCase__ : str = ort.SessionOptions()
lowerCAmelCase__ : List[Any] = False
return options
def __magic_name__( self ):
lowerCAmelCase__ : Tuple = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCAmelCase__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCAmelCase__ : List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = '''A red cat sitting on a park bench'''
lowerCAmelCase__ : List[Any] = np.random.RandomState(0 )
lowerCAmelCase__ : int = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=__UpperCAmelCase , output_type='''np''' , )
lowerCAmelCase__ : Any = output.images
lowerCAmelCase__ : Any = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __magic_name__( self ):
lowerCAmelCase__ : List[str] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
lowerCAmelCase__ : str = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
lowerCAmelCase__ : Union[str, Any] = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , subfolder='''scheduler''' , revision='''onnx''' )
lowerCAmelCase__ : Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''' , revision='''onnx''' , scheduler=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=__UpperCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
lowerCAmelCase__ : str = '''A red cat sitting on a park bench'''
lowerCAmelCase__ : Union[str, Any] = np.random.RandomState(0 )
lowerCAmelCase__ : Optional[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , mask_image=__UpperCAmelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=__UpperCAmelCase , output_type='''np''' , )
lowerCAmelCase__ : Tuple = output.images
lowerCAmelCase__ : Optional[Any] = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
lowerCAmelCase__ : Optional[int] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 470
| 1
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[int]:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE = mock.Mock()
__SCREAMING_SNAKE_CASE = 5_00
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = HTTPError
__SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=__a ) as mock_head:
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def __lowerCAmelCase ( self ) -> str:
# A mock response for an HTTP head request to emulate server down
__SCREAMING_SNAKE_CASE = mock.Mock()
__SCREAMING_SNAKE_CASE = 5_00
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = HTTPError
__SCREAMING_SNAKE_CASE = {}
# Download this model to make sure it's in the cache.
__SCREAMING_SNAKE_CASE = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=__a ) as mock_head:
__SCREAMING_SNAKE_CASE = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowerCAmelCase ( self ) -> str:
# This test is for deprecated behavior and can be removed in v5
try:
__SCREAMING_SNAKE_CASE = tempfile.mktemp()
with open(__a, "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", __a )
__SCREAMING_SNAKE_CASE = AlbertTokenizer.from_pretrained(__a )
finally:
os.remove(__a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json", "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", __a )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
# This test is for deprecated behavior and can be removed in v5
__SCREAMING_SNAKE_CASE = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
SCREAMING_SNAKE_CASE__ =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou''']
@classmethod
def __lowerCAmelCase ( cls ) -> int:
__SCREAMING_SNAKE_CASE = TOKEN
HfFolder.save_token(__a )
@classmethod
def __lowerCAmelCase ( cls ) -> Optional[Any]:
try:
delete_repo(token=cls._token, repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def __lowerCAmelCase ( self ) -> Tuple:
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = os.path.join(__a, "vocab.txt" )
with open(__a, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = BertTokenizer(__a )
tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a, repo_id="test-tokenizer", push_to_hub=__a, use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(f'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def __lowerCAmelCase ( self ) -> List[str]:
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = os.path.join(__a, "vocab.txt" )
with open(__a, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = BertTokenizer(__a )
tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__a, repo_id="valid_org/test-tokenizer-org", push_to_hub=__a, use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def __lowerCAmelCase ( self ) -> Any:
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = os.path.join(__a, "vocab.txt" )
with open(__a, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = CustomTokenizer(__a )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''', trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
__SCREAMING_SNAKE_CASE = os.path.join(__a, "vocab.txt" )
with open(__a, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
__SCREAMING_SNAKE_CASE = CustomTokenizerFast.from_pretrained(__a )
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(f'''{USER}/test-dynamic-tokenizer''', trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast" )
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
f'''{USER}/test-dynamic-tokenizer''', use_fast=__a, trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> List[Any]:
__SCREAMING_SNAKE_CASE = Trie()
trie.add("Hello ๅ้" )
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"ๅ": {"้": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"ๅ": {"้": {"": 1}}}}}}}}} )
def __lowerCAmelCase ( self ) -> Any:
__SCREAMING_SNAKE_CASE = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS]", " This is a ", "extra_id_100"] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ), ["A", "BC"] )
self.assertEqual(trie.split("BCA" ), ["BC", "A"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def __lowerCAmelCase ( self ) -> str:
__SCREAMING_SNAKE_CASE = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ), ["AB", "C"] )
def __lowerCAmelCase ( self ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ), ["ABC", "D"] )
def __lowerCAmelCase ( self ) -> Dict:
# Even if the offsets are wrong, we necessarily output correct string
# parts.
__SCREAMING_SNAKE_CASE = Trie()
__SCREAMING_SNAKE_CASE = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3] )
self.assertEqual(__a, ["AB", "C"] )
| 693
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class A__ :
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: int , )-> Dict:
lowerCamelCase : Optional[Any] = parent
lowerCamelCase : int = 13
lowerCamelCase : Tuple = 7
lowerCamelCase : Any = True
lowerCamelCase : List[Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Optional[int] = 99
lowerCamelCase : str = 32
lowerCamelCase : Dict = 2
lowerCamelCase : Optional[Any] = 4
lowerCamelCase : List[str] = 37
lowerCamelCase : Any = """gelu"""
lowerCamelCase : List[Any] = 0.1
lowerCamelCase : Optional[Any] = 0.1
lowerCamelCase : List[Any] = 512
lowerCamelCase : Optional[int] = 16
lowerCamelCase : str = 2
lowerCamelCase : Optional[int] = 0.02
lowerCamelCase : Dict = 3
lowerCamelCase : List[Any] = 4
lowerCamelCase : Union[str, Any] = None
def a__ ( self: Optional[int] )-> str:
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : int = None
if self.use_input_mask:
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : str = None
lowerCamelCase : Dict = None
lowerCamelCase : Optional[Any] = None
if self.use_labels:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Union[str, Any] = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def a__ ( self: Any )-> List[Any]:
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = self.prepare_config_and_inputs()
lowerCamelCase : Dict = True
lowerCamelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def a__ ( self: List[Any] , __a: str , __a: int , __a: Any , __a: Dict , __a: int , __a: str )-> Union[str, Any]:
lowerCamelCase : List[str] = TFEsmModel(config=__a )
lowerCamelCase : Any = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCamelCase : Union[str, Any] = model(__a )
lowerCamelCase : Union[str, Any] = [input_ids, input_mask]
lowerCamelCase : Tuple = model(__a )
lowerCamelCase : Dict = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self: List[str] , __a: Any , __a: List[Any] , __a: Tuple , __a: Tuple , __a: int , __a: Optional[int] , __a: str , __a: Optional[int] , )-> Dict:
lowerCamelCase : str = True
lowerCamelCase : List[str] = TFEsmModel(config=__a )
lowerCamelCase : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
lowerCamelCase : List[Any] = model(__a )
lowerCamelCase : int = [input_ids, input_mask]
lowerCamelCase : int = model(__a , encoder_hidden_states=__a )
# Also check the case where encoder outputs are not passed
lowerCamelCase : List[Any] = model(__a , attention_mask=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self: Optional[int] , __a: Optional[Any] , __a: Tuple , __a: List[str] , __a: Tuple , __a: Optional[Any] , __a: str )-> Any:
lowerCamelCase : Optional[int] = TFEsmForMaskedLM(config=__a )
lowerCamelCase : Dict = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self: Dict , __a: Optional[Any] , __a: Any , __a: int , __a: Optional[int] , __a: str , __a: List[Any] )-> Any:
lowerCamelCase : int = self.num_labels
lowerCamelCase : Dict = TFEsmForTokenClassification(config=__a )
lowerCamelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
lowerCamelCase : Dict = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self: Dict )-> str:
lowerCamelCase : str = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Dict = config_and_inputs
lowerCamelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A__ ( __lowercase , __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : Optional[Any] =(
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
snake_case__ : int =(
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
snake_case__ : Optional[Any] =False
snake_case__ : Any =False
def a__ ( self: Any )-> Optional[int]:
lowerCamelCase : Optional[Any] = TFEsmModelTester(self )
lowerCamelCase : Tuple = ConfigTester(self , config_class=__a , hidden_size=37 )
def a__ ( self: Optional[int] )-> Tuple:
self.config_tester.run_common_tests()
def a__ ( self: str )-> Any:
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def a__ ( self: Any )-> Dict:
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__a )
def a__ ( self: Tuple )-> Tuple:
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__a )
def a__ ( self: int )-> Optional[Any]:
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__a )
@slow
def a__ ( self: Union[str, Any] )-> str:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Tuple = TFEsmModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def a__ ( self: List[Any] )-> Optional[Any]:
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def a__ ( self: Tuple )-> str:
pass
def a__ ( self: List[Any] )-> Tuple:
lowerCamelCase , lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int = model_class(__a )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCamelCase : Dict = model.get_bias()
assert isinstance(__a , __a )
for k, v in name.items():
assert isinstance(__a , tf.Variable )
else:
lowerCamelCase : Union[str, Any] = model.get_output_embeddings()
assert x is None
lowerCamelCase : str = model.get_bias()
assert name is None
@require_tf
class A__ ( unittest.TestCase):
"""simple docstring"""
@slow
def a__ ( self: Any )-> Tuple:
lowerCamelCase : List[Any] = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCamelCase : List[str] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCamelCase : Union[str, Any] = model(__a )[0]
lowerCamelCase : Union[str, Any] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , __a )
# compare the actual values for a slice.
lowerCamelCase : Any = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def a__ ( self: List[Any] )-> Optional[int]:
lowerCamelCase : int = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
lowerCamelCase : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
lowerCamelCase : List[str] = model(__a )[0]
# compare the actual values for a slice.
lowerCamelCase : str = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 222
| 0
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__lowerCamelCase = logging.getLogger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : Optional[Any] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Tuple=None ):
super().__init__(
_lowerCamelCase , question_encoder_tokenizer=_lowerCamelCase , generator_tokenizer=_lowerCamelCase , index=_lowerCamelCase , init_retrieval=_lowerCamelCase , )
_UpperCAmelCase : str = None
def a__ ( self : Optional[Any] , _lowerCamelCase : int ):
logger.info("initializing retrieval" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("dist initialized" )
# needs to be set manually
_UpperCAmelCase : int = self._infer_socket_ifname()
# avoid clash with the NCCL port
_UpperCAmelCase : int = str(distributed_port + 1 )
_UpperCAmelCase : Union[str, Any] = dist.new_group(ranks=_lowerCamelCase , backend="gloo" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("dist not initialized / main" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def a__ ( self : Tuple ):
return dist.get_rank(group=self.process_group ) == 0
def a__ ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : str=torch.floataa ):
_UpperCAmelCase : int = torch.empty(_lowerCamelCase , dtype=_lowerCamelCase )
dist.scatter(_lowerCamelCase , src=0 , scatter_list=_lowerCamelCase , group=self.process_group )
return target_tensor
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
_UpperCAmelCase : Union[str, Any] = next((addr for addr in addrs if addr.startswith("e" )) , _lowerCamelCase )
return ifname
def a__ ( self : Optional[int] , _lowerCamelCase : np.ndarray , _lowerCamelCase : int ):
# single GPU training
if not dist.is_initialized():
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = self._main_retrieve(_lowerCamelCase , _lowerCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_lowerCamelCase )
# distributed training
_UpperCAmelCase : Optional[int] = dist.get_world_size(group=self.process_group )
# gather logic
_UpperCAmelCase : int = None
if self._is_main():
_UpperCAmelCase : List[str] = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(_lowerCamelCase )]
dist.gather(torch.tensor(_lowerCamelCase ) , dst=0 , gather_list=_lowerCamelCase , group=self.process_group )
# scatter logic
_UpperCAmelCase : str = question_hidden_states.shape[0]
_UpperCAmelCase : Dict = []
_UpperCAmelCase : List[Any] = []
if self._is_main():
assert len(_lowerCamelCase ) == world_size
_UpperCAmelCase ,_UpperCAmelCase : Any = self._main_retrieve(torch.cat(_lowerCamelCase ).numpy() , _lowerCamelCase )
_UpperCAmelCase ,_UpperCAmelCase : Union[str, Any] = torch.tensor(_lowerCamelCase ), torch.tensor(_lowerCamelCase )
_UpperCAmelCase : Tuple = self._chunk_tensor(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : List[Any] = self._chunk_tensor(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : str = self._scattered(_lowerCamelCase , [n_queries, n_docs] , target_type=torch.intaa )
_UpperCAmelCase : Tuple = self._scattered(_lowerCamelCase , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_lowerCamelCase )
| 328
|
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
__A: Optional[Any] = ["""image_processor""", """tokenizer"""]
__A: List[str] = """CLIPImageProcessor"""
__A: Dict = ("""CLIPTokenizer""", """CLIPTokenizerFast""")
def __init__( self : str , _lowerCamelCase : List[str]=None , _lowerCamelCase : Any=None , **_lowerCamelCase : str ):
_UpperCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _lowerCamelCase , )
_UpperCAmelCase : Optional[Any] = kwargs.pop("feature_extractor" )
_UpperCAmelCase : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_lowerCamelCase , _lowerCamelCase )
def __call__( self : Any , _lowerCamelCase : List[str]=None , _lowerCamelCase : str=None , _lowerCamelCase : Optional[int]=None , **_lowerCamelCase : Union[str, Any] ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_UpperCAmelCase : Union[str, Any] = self.tokenizer(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if images is not None:
_UpperCAmelCase : Any = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase )
if text is not None and images is not None:
_UpperCAmelCase : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCamelCase ) , tensor_type=_lowerCamelCase )
def a__ ( self : Union[str, Any] , *_lowerCamelCase : List[Any] , **_lowerCamelCase : List[str] ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def a__ ( self : Union[str, Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : str ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def a__ ( self : Any ):
_UpperCAmelCase : Union[str, Any] = self.tokenizer.model_input_names
_UpperCAmelCase : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a__ ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _lowerCamelCase , )
return self.image_processor_class
@property
def a__ ( self : int ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _lowerCamelCase , )
return self.image_processor
| 328
| 1
|
'''simple docstring'''
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class __magic_name__ :
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Any = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__A : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__A : List[str] = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__A : Tuple = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__A : Any = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__A : Optional[int] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__A : Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
__A : str = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
__A : Optional[int] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=lowerCamelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
__A : List[str] = DDPMScheduler(
num_train_timesteps=1000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
__A : List[Any] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[str] = self.get_dummy_components()
__A : Any = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__A : Optional[int] = self.get_dummy_inputs(lowerCamelCase )
__A : Optional[int] = inputs["prompt"]
__A : Optional[int] = inputs["generator"]
__A : List[str] = inputs["num_inference_steps"]
__A : str = inputs["output_type"]
if "image" in inputs:
__A : List[Any] = inputs["image"]
else:
__A : Tuple = None
if "mask_image" in inputs:
__A : Optional[int] = inputs["mask_image"]
else:
__A : List[str] = None
if "original_image" in inputs:
__A : Tuple = inputs["original_image"]
else:
__A : List[Any] = None
__A ,__A : int = pipe.encode_prompt(lowerCamelCase )
# inputs with prompt converted to embeddings
__A : Tuple = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__A : Union[str, Any] = image
if mask_image is not None:
__A : Optional[Any] = mask_image
if original_image is not None:
__A : List[Any] = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(lowerCamelCase , lowerCamelCase , lowerCamelCase )
__A : List[Any] = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__A : Dict = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(lowerCamelCase , lowerCamelCase ) is None , f"`{optional_component}` did not stay set to None after loading." , )
__A : Dict = self.get_dummy_inputs(lowerCamelCase )
__A : List[str] = inputs["generator"]
__A : Tuple = inputs["num_inference_steps"]
__A : str = inputs["output_type"]
# inputs with prompt converted to embeddings
__A : Dict = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
__A : Dict = image
if mask_image is not None:
__A : List[str] = mask_image
if original_image is not None:
__A : Optional[int] = original_image
__A : str = pipe_loaded(**lowerCamelCase )[0]
__A : List[Any] = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1E-4 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = self.get_dummy_components()
__A : Optional[Any] = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__A : Optional[int] = self.get_dummy_inputs(lowerCamelCase )
__A : List[str] = pipe(**lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(lowerCamelCase )
__A : List[str] = self.pipeline_class.from_pretrained(lowerCamelCase )
pipe_loaded.to(lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=lowerCamelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
__A : Dict = self.get_dummy_inputs(lowerCamelCase )
__A : Optional[Any] = pipe_loaded(**lowerCamelCase )[0]
__A : str = np.abs(to_np(lowerCamelCase ) - to_np(lowerCamelCase ) ).max()
self.assertLess(lowerCamelCase , 1E-4 )
| 111
|
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class __magic_name__ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=99 , lowerCamelCase=0 , lowerCamelCase=32 , lowerCamelCase=5 , lowerCamelCase=4 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=512 , lowerCamelCase=2 , lowerCamelCase=0.02 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase="last" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=0 , ):
'''simple docstring'''
__A : Tuple = parent
__A : Optional[Any] = batch_size
__A : int = seq_length
__A : int = is_training
__A : List[str] = use_input_lengths
__A : Optional[Any] = use_token_type_ids
__A : Any = use_labels
__A : Dict = gelu_activation
__A : List[str] = sinusoidal_embeddings
__A : List[Any] = causal
__A : Optional[int] = asm
__A : List[Any] = n_langs
__A : List[str] = vocab_size
__A : Any = n_special
__A : Optional[int] = hidden_size
__A : Optional[Any] = num_hidden_layers
__A : Tuple = num_attention_heads
__A : List[str] = hidden_dropout_prob
__A : Any = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : Optional[int] = type_sequence_label_size
__A : Tuple = initializer_range
__A : List[Any] = num_labels
__A : List[Any] = num_choices
__A : Any = summary_type
__A : List[str] = use_proj
__A : List[Any] = scope
__A : str = bos_token_id
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : int = random_attention_mask([self.batch_size, self.seq_length] )
__A : Union[str, Any] = None
if self.use_input_lengths:
__A : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__A : Any = None
if self.use_token_type_ids:
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__A : int = None
__A : Any = None
__A : Tuple = None
if self.use_labels:
__A : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : int = ids_tensor([self.batch_size] , 2 ).float()
__A : List[str] = ids_tensor([self.batch_size] , self.num_choices )
__A : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__A : Tuple = XLMModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Tuple = model(lowerCamelCase , lengths=lowerCamelCase , langs=lowerCamelCase )
__A : Dict = model(lowerCamelCase , langs=lowerCamelCase )
__A : List[Any] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__A : str = XLMWithLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : List[str] = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__A : str = XLMForQuestionAnsweringSimple(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : int = model(lowerCamelCase )
__A : List[str] = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
__A : List[Any] = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__A : Optional[Any] = XLMForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : str = model(lowerCamelCase )
__A : Optional[int] = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , p_mask=lowerCamelCase , )
__A : Any = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , )
((__A) ,) : List[str] = result_with_labels.to_tuple()
__A : int = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
((__A) ,) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__A : Optional[int] = XLMForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : str = model(lowerCamelCase )
__A : Optional[int] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__A : str = self.num_labels
__A : int = XLMForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : str = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , ):
'''simple docstring'''
__A : Union[str, Any] = self.num_choices
__A : Union[str, Any] = XLMForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__A : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Any = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[Any] = self.prepare_config_and_inputs()
(
(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,(
__A
) ,
) : str = config_and_inputs
__A : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths}
return config, inputs_dict
@require_torch
class __magic_name__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCamelCase__ = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=False ):
'''simple docstring'''
__A : Optional[Any] = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
__A : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
__A : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = XLMModelTester(self )
__A : Any = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1 ):
'''simple docstring'''
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_attentions in attentions] , [True] * len(lowerCamelCase ) )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(lowerCamelCase ):
# adds PAD dummy token
__A : Optional[Any] = min_length + idx + 1
__A : Optional[int] = min_length + idx + 1
__A : List[Any] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(lowerCamelCase ) )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=False , lowerCamelCase=1 ):
'''simple docstring'''
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
self.assertListEqual(
[isinstance(lowerCamelCase , lowerCamelCase ) for iter_hidden_states in hidden_states] , [True] * len(lowerCamelCase ) , )
self.assertEqual(len(lowerCamelCase ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(lowerCamelCase ):
# adds PAD dummy token
__A : Dict = min_length + idx + 1
__A : Union[str, Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(lowerCamelCase ) , )
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Any = XLMModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@require_torch
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Tuple = XLMWithLMHeadModel.from_pretrained("xlm-mlm-en-2048" )
model.to(lowerCamelCase )
__A : Optional[int] = torch.tensor([[14, 447]] , dtype=torch.long , device=lowerCamelCase ) # the president
__A : Optional[Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
__A : Union[str, Any] = model.generate(lowerCamelCase , do_sample=lowerCamelCase )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , lowerCamelCase )
| 111
| 1
|
"""simple docstring"""
def UpperCamelCase ( _A , _A , _A , _A , _A , ) -> Union[str, Any]:
lowercase : Any = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
lowercase : int = 1 - (matter_density + radiation_density + dark_energy)
lowercase : str = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowercase : Union[str, Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
_lowerCAmelCase = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 714
|
"""simple docstring"""
from math import factorial, pi
def UpperCamelCase ( _A , _A = 30 ) -> float:
if not isinstance(_A , (int, float) ):
raise ValueError("""maclaurin_sin() requires either an int or float for theta""" )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError("""maclaurin_sin() requires a positive int for accuracy""" )
lowercase : Optional[int] = float(_A )
lowercase : Any = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_A ) )
def UpperCamelCase ( _A , _A = 30 ) -> float:
if not isinstance(_A , (int, float) ):
raise ValueError("""maclaurin_cos() requires either an int or float for theta""" )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError("""maclaurin_cos() requires a positive int for accuracy""" )
lowercase : str = float(_A )
lowercase : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 348
| 0
|
'''simple docstring'''
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : List[Any] , a__ : Optional[int] , a__ : List[str]=99 , a__ : str=13 , a__ : List[Any]=7 , a__ : Union[str, Any]=9 , a__ : int=True , a__ : Optional[Any]=True , a__ : Union[str, Any]=False , a__ : List[Any]=32 , a__ : Dict=5 , a__ : Dict=4 , a__ : int=37 , a__ : List[Any]=8 , a__ : List[str]=0.1 , a__ : Dict=0.002 , a__ : Dict=1 , a__ : Optional[int]=0 , a__ : Tuple=0 , a__ : int=None , a__ : Any=None , ):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = encoder_seq_length
UpperCAmelCase = decoder_seq_length
# For common tests
UpperCAmelCase = self.decoder_seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_attention_mask
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = d_ff
UpperCAmelCase = relative_attention_num_buckets
UpperCAmelCase = dropout_rate
UpperCAmelCase = initializer_factor
UpperCAmelCase = eos_token_id
UpperCAmelCase = pad_token_id
UpperCAmelCase = decoder_start_token_id
UpperCAmelCase = None
UpperCAmelCase = decoder_layers
def __snake_case ( self : Tuple ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def __snake_case ( self : Optional[Any] , a__ : Any , a__ : Tuple , a__ : Union[str, Any] , a__ : List[Any]=None , a__ : Union[str, Any]=None , a__ : int=None , a__ : List[str]=None , a__ : Dict=None , ):
if attention_mask is None:
UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=a__ )
if decoder_head_mask is None:
UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=a__ )
if cross_attn_head_mask is None:
UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=a__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def __snake_case ( self : List[str] ):
UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCAmelCase = self.get_config()
UpperCAmelCase = config.num_attention_heads
UpperCAmelCase = self.prepare_inputs_dict(a__ , a__ , a__ )
return config, input_dict
def __snake_case ( self : Any ):
UpperCAmelCase, UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def __snake_case ( self : Optional[int] ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __snake_case ( self : Tuple ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def __snake_case ( self : List[str] , a__ : List[str] , a__ : List[str] , a__ : List[Any] , a__ : Any , a__ : Tuple , a__ : List[Any] , ):
UpperCAmelCase = UMTaModel(config=a__ )
model.to(a__ )
model.eval()
UpperCAmelCase = model(
input_ids=a__ , decoder_input_ids=a__ , attention_mask=a__ , decoder_attention_mask=a__ , )
UpperCAmelCase = model(input_ids=a__ , decoder_input_ids=a__ )
UpperCAmelCase = result.last_hidden_state
UpperCAmelCase = result.past_key_values
UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(a__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def __snake_case ( self : Optional[Any] , a__ : str , a__ : Dict , a__ : Dict , a__ : int , a__ : Tuple , a__ : int , ):
UpperCAmelCase = UMTaModel(config=a__ ).get_decoder().to(a__ ).eval()
# first forward pass
UpperCAmelCase = model(a__ , use_cache=a__ )
UpperCAmelCase = model(a__ )
UpperCAmelCase = model(a__ , use_cache=a__ )
self.parent.assertTrue(len(a__ ) == len(a__ ) )
self.parent.assertTrue(len(a__ ) == len(a__ ) + 1 )
UpperCAmelCase, UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase = model(a__ )['''last_hidden_state''']
UpperCAmelCase = model(a__ , past_key_values=a__ )['''last_hidden_state''']
# select random slice
UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(a__ , a__ , atol=1e-3 ) )
def __snake_case ( self : Optional[int] , a__ : Tuple , a__ : Dict , ):
UpperCAmelCase = UMTaModel(config=a__ ).to(a__ ).half().eval()
UpperCAmelCase = model(**a__ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(a__ ).any().item() )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase =(
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
_lowerCamelCase =(UMTaForConditionalGeneration,) if is_torch_available() else ()
_lowerCamelCase =(
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
_lowerCamelCase =True
_lowerCamelCase =False
_lowerCamelCase =False
_lowerCamelCase =True
_lowerCamelCase =True
# The small UMT5 model needs higher percentages for CPU/MP tests
_lowerCamelCase =[0.8, 0.9]
def __snake_case ( self : Optional[int] ):
UpperCAmelCase = UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def __snake_case ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(a__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
a__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"{tmpdirname}/t5_test.onnx" , export_params=a__ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def __snake_case ( self : List[str] ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*a__ )
def __snake_case ( self : List[Any] ):
UpperCAmelCase = ['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase = config_and_inputs[0]
UpperCAmelCase = UMTaForConditionalGeneration(a__ ).eval()
model.to(a__ )
UpperCAmelCase = {
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=a__ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=a__ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=a__ ),
}
for attn_name, (name, mask) in zip(a__ , head_masking.items() ):
UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=a__ )
UpperCAmelCase = model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=a__ , return_dict_in_generate=a__ , **a__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def __snake_case ( self : Tuple ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def __snake_case ( self : Dict ):
UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=a__ ).to(a__ )
UpperCAmelCase = AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=a__ , legacy=a__ )
UpperCAmelCase = [
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCAmelCase = tokenizer(a__ , return_tensors='''pt''' , padding=a__ ).input_ids
# fmt: off
UpperCAmelCase = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(a__ , a__ )
UpperCAmelCase = model.generate(input_ids.to(a__ ) )
UpperCAmelCase = [
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCAmelCase = tokenizer.batch_decode(a__ )
self.assertEqual(a__ , a__ )
| 51
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase ( lowercase : Any ) -> List[str]:
return getitem, k
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Any:
return setitem, k, v
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
return delitem, k
def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict , *lowercase : Union[str, Any] ) -> int:
try:
return fun(lowercase , *lowercase ), None
except Exception as e:
return None, e
lowerCAmelCase_ : Optional[Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCAmelCase_ : Optional[int] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCAmelCase_ : int = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCAmelCase_ : List[Any] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[int]:
_a = HashMap(initial_block_size=4 )
_a = {}
for _, (fun, *args) in enumerate(lowercase ):
_a , _a = _run_operation(lowercase , lowercase , *lowercase )
_a , _a = _run_operation(lowercase , lowercase , *lowercase )
assert my_res == py_res
assert str(lowercase ) == str(lowercase )
assert set(lowercase ) == set(lowercase )
assert len(lowercase ) == len(lowercase )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase ( ) -> str:
def is_public(lowercase : str ) -> bool:
return not name.startswith("_" )
_a = {name for name in dir({} ) if is_public(lowercase )}
_a = {name for name in dir(HashMap() ) if is_public(lowercase )}
assert dict_public_names > hash_public_names
| 692
| 0
|
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( _UpperCAmelCase ):
def __init__( self : Tuple , *A_ : Union[str, Any] , A_ : Optional[Any]=None , A_ : Optional[Any]=None , **A_ : Dict ):
'''simple docstring'''
super().__init__(*lowercase__ , **lowercase__ )
__lowercase = eval_examples
__lowercase = post_process_function
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : int = None , A_ : Tuple=None , A_ : List[str] = None , A_ : str = "eval" , **A_ : Optional[int] , ):
'''simple docstring'''
__lowercase = gen_kwargs.copy()
__lowercase = (
gen_kwargs["max_length"] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
__lowercase = (
gen_kwargs["num_beams"] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
__lowercase = gen_kwargs
__lowercase = self.eval_dataset if eval_dataset is None else eval_dataset
__lowercase = self.get_eval_dataloader(lowercase__ )
__lowercase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = time.time()
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase = eval_loop(
lowercase__ , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , metric_key_prefix=lowercase__ , )
finally:
__lowercase = compute_metrics
__lowercase = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase__ , lowercase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__lowercase = self.post_process_function(lowercase__ , lowercase__ , lowercase__ )
__lowercase = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__lowercase = metrics.pop(lowercase__ )
metrics.update(output.metrics )
else:
__lowercase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(lowercase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__lowercase = self.callback_handler.on_evaluate(self.args , self.state , self.control , lowercase__ )
return metrics
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : List[str]=None , A_ : Tuple = "test" , **A_ : Union[str, Any] ):
'''simple docstring'''
__lowercase = gen_kwargs.copy()
__lowercase = self.get_test_dataloader(lowercase__ )
# Temporarily disable metric computation, we will do it in the loop here.
__lowercase = self.compute_metrics
__lowercase = None
__lowercase = time.time()
__lowercase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
__lowercase = eval_loop(
lowercase__ , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=lowercase__ , metric_key_prefix=lowercase__ , )
finally:
__lowercase = compute_metrics
__lowercase = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
lowercase__ , lowercase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__lowercase = self.post_process_function(lowercase__ , lowercase__ , lowercase__ , """predict""" )
__lowercase = self.compute_metrics(lowercase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
__lowercase = metrics.pop(lowercase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=lowercase__ )
| 702
|
"""simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class lowerCamelCase__ ( _a ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = tempfile.mkdtemp()
__lowercase = 5
# Realm tok
__lowercase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__lowercase = os.path.join(self.tmpdirname , """realm_tokenizer""" )
os.makedirs(A_ , exist_ok=A_ )
__lowercase = os.path.join(A_ , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__lowercase = os.path.join(self.tmpdirname , """realm_block_records""" )
os.makedirs(A_ , exist_ok=A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , """realm_tokenizer""" ) )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
__lowercase = RealmConfig(num_block_records=self.num_block_records )
return config
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
__lowercase = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = np.array(
[
B"""This is the first record""",
B"""This is the second record""",
B"""This is the third record""",
B"""This is the fourth record""",
B"""This is the fifth record""",
B"""This is a longer longer longer record""",
] , dtype=A_ , )
return block_records
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def SCREAMING_SNAKE_CASE_ ( self : Dict ):
'''simple docstring'''
__lowercase = self.get_config()
__lowercase = self.get_dummy_retriever()
__lowercase = retriever.tokenizer
__lowercase = np.array([0, 3] , dtype="""long""" )
__lowercase = tokenizer(["""Test question"""] ).input_ids
__lowercase = tokenizer(
["""the fourth"""] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
__lowercase = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="""np""" )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(len(A_ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 1_0) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 1_0) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 1_0) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] , )
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
__lowercase = self.get_config()
__lowercase = self.get_dummy_retriever()
__lowercase = retriever.tokenizer
__lowercase = np.array([0, 3, 5] , dtype="""long""" )
__lowercase = tokenizer(["""Test question"""] ).input_ids
__lowercase = tokenizer(
["""the fourth""", """longer longer"""] , add_special_tokens=A_ , return_token_type_ids=A_ , return_attention_mask=A_ , ).input_ids
__lowercase = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase = retriever(
A_ , A_ , answer_ids=A_ , max_length=A_ , return_tensors="""np""" )
self.assertEqual([False, True, True] , A_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , A_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
__lowercase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
# Test local path
__lowercase = retriever.from_pretrained(os.path.join(self.tmpdirname , """realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
__lowercase = os.path.join(
os.path.join(self.tmpdirname , """realm_block_records""" ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowercase = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] , B"""This is the first record""" )
| 442
| 0
|
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowercase_ ( _lowercase ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ : List[Any] = os.path.join(args.tf_model_dir , '''parameters.json''' )
lowerCamelCase_ : Optional[int] = json.loads(open(_lowercase ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('''.pt''' ):
lowerCamelCase_ : int = args.output + '''.pt'''
lowerCamelCase_ : Optional[Any] = OrderedDict()
with tf.device('''/CPU:0''' ):
lowerCamelCase_ : List[Any] = tf.train.load_checkpoint(args.tf_model_dir )
lowerCamelCase_ : List[str] = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCamelCase_ : List[Any] = reader.get_tensor(_lowercase ).astype(np.floataa )
if key_name.endswith('''/adam_m''' ) or key_name.endswith('''/adam_v''' ):
continue
if key_name.startswith('''pasts/''' ):
if key_name.startswith('''pasts/mlp''' ):
lowerCamelCase_ : Any = int(key_name[9] )
elif key_name.startswith('''pasts/out''' ):
lowerCamelCase_ : Dict = 8
lowerCamelCase_ : Union[str, Any] = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCamelCase_ : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : Tuple = torch.tensor(_lowercase )
elif key_name.startswith('''model/moe''' ):
lowerCamelCase_ : Union[str, Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/switch_gating/kernel''' ):
lowerCamelCase_ : Dict = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCamelCase_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : Tuple = torch.tensor(_lowercase )
elif key_name.endswith('''/softmlp/kernel''' ):
lowerCamelCase_ : Union[str, Any] = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCamelCase_ : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : Optional[int] = torch.tensor(_lowercase )
elif key_name.endswith('''/wo/kernel''' ) or key_name.endswith('''/wi/kernel''' ):
lowerCamelCase_ : Any = key_name[-9:-7]
for i in range(16 ):
lowerCamelCase_ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCamelCase_ : Optional[int] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCamelCase_ : str = torch.tensor(_lowercase )
elif key_name.startswith('''model/mlp''' ):
lowerCamelCase_ : Tuple = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/p1/kernel''' ):
lowerCamelCase_ : Union[str, Any] = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCamelCase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : Union[str, Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/p1/bias''' ):
lowerCamelCase_ : int = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCamelCase_ : Optional[int] = vnp.copy() # same because it is one dimensional
lowerCamelCase_ : int = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/kernel''' ):
lowerCamelCase_ : List[str] = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCamelCase_ : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : Union[str, Any] = torch.tensor(_lowercase )
elif key_name.endswith('''/p2/bias''' ):
lowerCamelCase_ : Optional[Any] = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCamelCase_ : List[str] = vnp.copy() # same because it is one dimensional
lowerCamelCase_ : Optional[int] = torch.tensor(_lowercase )
elif key_name.startswith('''model/ln''' ):
lowerCamelCase_ : Any = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase_ : int = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCamelCase_ : Dict = vnp.copy() # same because it is one dimensional
lowerCamelCase_ : str = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCamelCase_ : Union[str, Any] = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCamelCase_ : int = vnp.copy() # same because it is one dimensional
lowerCamelCase_ : Dict = torch.tensor(_lowercase )
elif key_name.startswith('''model/att''' ):
lowerCamelCase_ : List[Any] = int(key_name[9:].split('''/''' )[0] )
if key_name.endswith('''/qkv/kernel''' ):
lowerCamelCase_ : Optional[int] = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCamelCase_ : Any = state[:, 0, :, :]
lowerCamelCase_ : Any = state[:, 1, :, :]
lowerCamelCase_ : Optional[Any] = state[:, 2, :, :]
lowerCamelCase_ : Union[str, Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : int = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : Optional[int] = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : Tuple = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCamelCase_ : List[str] = torch.tensor(_lowercase )
lowerCamelCase_ : Dict = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCamelCase_ : List[Any] = torch.tensor(_lowercase )
lowerCamelCase_ : Optional[Any] = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCamelCase_ : str = torch.tensor(_lowercase )
elif key_name.endswith('''/o/kernel''' ):
lowerCamelCase_ : List[Any] = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCamelCase_ : List[Any] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : List[str] = torch.tensor(_lowercase )
elif key_name.startswith('''model/an''' ):
lowerCamelCase_ : List[str] = int(key_name[8:].split('''/''' )[0] )
if key_name.endswith('''/b''' ):
lowerCamelCase_ : Dict = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCamelCase_ : Optional[Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase_ : Tuple = torch.tensor(_lowercase )
elif key_name.endswith('''/g''' ):
lowerCamelCase_ : Any = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCamelCase_ : Dict = vnp.copy() # same because it is one dimensional
lowerCamelCase_ : Optional[Any] = torch.tensor(_lowercase )
elif (
key_name.startswith('''model/wte''' )
or key_name.startswith('''model/wpe''' )
or key_name.startswith('''model/ete''' )
):
lowerCamelCase_ : Union[str, Any] = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCamelCase_ : Dict = '''model.%s.weight''' % nlayer
lowerCamelCase_ : int = vnp.copy() # same in embedded
lowerCamelCase_ : Any = torch.tensor(_lowercase )
if key_name.startswith('''model/wte''' ):
lowerCamelCase_ : Tuple = '''lm_head.weight'''
lowerCamelCase_ : List[Any] = vnp.copy() # same in embedded
lowerCamelCase_ : List[Any] = torch.tensor(_lowercase )
elif key_name.startswith('''model/wob''' ):
lowerCamelCase_ : Dict = '''final_logits_bias'''
lowerCamelCase_ : Union[str, Any] = vnp.copy() # same in embedded
lowerCamelCase_ : Tuple = state.reshape((1, -1) )
lowerCamelCase_ : List[Any] = torch.tensor(_lowercase )
elif key_name == "model/dense/kernel":
lowerCamelCase_ : Union[str, Any] = '''model.last_project.weight'''
lowerCamelCase_ : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCamelCase_ : Any = torch.tensor(_lowercase )
elif key_name == "model/dense_1/bias":
lowerCamelCase_ : Union[str, Any] = '''model.last_project.bias'''
lowerCamelCase_ : Optional[Any] = vnp.copy() # same because it is one dimensional
lowerCamelCase_ : Union[str, Any] = torch.tensor(_lowercase )
torch.save(_lowercase , args.output )
if __name__ == "__main__":
__lowercase : int = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
__lowercase : Dict = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 422
|
'''simple docstring'''
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowercase_ ( _lowercase , _lowercase=False ) -> Dict:
'''simple docstring'''
lowerCamelCase_ : Tuple = OmegaConf.load(_lowercase )
if display:
print(yaml.dump(OmegaConf.to_container(_lowercase ) ) )
return config
def lowercase_ ( _lowercase , _lowercase=None , _lowercase=None ) -> Optional[int]:
'''simple docstring'''
if conf_path is None:
lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.yaml'''
lowerCamelCase_ : Dict = load_config(_lowercase , display=_lowercase )
lowerCamelCase_ : List[str] = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase_ : int = '''./model_checkpoints/vqgan_only.pt'''
lowerCamelCase_ : Union[str, Any] = torch.load(_lowercase , map_location=_lowercase )
if ".ckpt" in ckpt_path:
lowerCamelCase_ : str = sd['''state_dict''']
model.load_state_dict(_lowercase , strict=_lowercase )
model.to(_lowercase )
del sd
return model
def lowercase_ ( _lowercase , _lowercase ) -> List[str]:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = model.encode(_lowercase )
print(F"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" )
lowerCamelCase_ : Any = model.decode(_lowercase )
return xrec
def lowercase_ ( _lowercase , _lowercase=False ) -> Any:
'''simple docstring'''
lowerCamelCase_, lowerCamelCase_ : Any = string.rsplit('''.''' , 1 )
if reload:
lowerCamelCase_ : int = importlib.import_module(_lowercase )
importlib.reload(_lowercase )
return getattr(importlib.import_module(_lowercase , package=_lowercase ) , cls )
def lowercase_ ( _lowercase ) -> List[str]:
'''simple docstring'''
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def lowercase_ ( _lowercase , _lowercase , _lowercase=True , _lowercase=True ) -> Any:
'''simple docstring'''
lowerCamelCase_ : int = instantiate_from_config(_lowercase )
if sd is not None:
model.load_state_dict(_lowercase )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
'''simple docstring'''
if ckpt:
lowerCamelCase_ : List[Any] = torch.load(_lowercase , map_location='''cpu''' )
lowerCamelCase_ : int = pl_sd['''global_step''']
print(F"""loaded model from global step {global_step}.""" )
else:
lowerCamelCase_ : Optional[int] = {'''state_dict''': None}
lowerCamelCase_ : str = None
lowerCamelCase_ : Any = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=_lowercase , eval_mode=_lowercase )['''model''']
return model, global_step
| 422
| 1
|
from __future__ import annotations
def lowercase ( _a ,_a ,_a ) -> tuple[float, list[float]]:
UpperCAmelCase_: Dict = list(range(len(__A ) ) )
UpperCAmelCase_: Dict = [v / w for v, w in zip(__A ,__A )]
index.sort(key=lambda _a : ratio[i] ,reverse=__A )
UpperCAmelCase_: float = 0
UpperCAmelCase_: list[float] = [0] * len(__A )
for i in index:
if weight[i] <= capacity:
UpperCAmelCase_: int = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCAmelCase_: Dict = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase__ ( snake_case__ , unittest.TestCase ):
snake_case_ = TransfoXLTokenizer
snake_case_ = False
snake_case_ = False
def snake_case_ ( self ):
"""simple docstring"""
super().setUp()
UpperCAmelCase_: Any = [
"<unk>",
"[CLS]",
"[SEP]",
"want",
"unwanted",
"wa",
"un",
"running",
",",
"low",
"l",
]
UpperCAmelCase_: str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **A__ ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **A__ )
def snake_case_ ( self , A__ ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = "<unk> UNwanted , running"
UpperCAmelCase_: List[str] = "<unk> unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=A__ )
UpperCAmelCase_: Optional[int] = tokenizer.tokenize("<unk> UNwanted , running" )
self.assertListEqual(A__ , ["<unk>", "unwanted", ",", "running"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A__ ) , [0, 4, 8, 7] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Tuple = TransfoXLTokenizer(lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["hello", "!", "how", "are", "you", "?"] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = TransfoXLTokenizer(lower_case=A__ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = TransfoXLTokenizer(lower_case=A__ )
UpperCAmelCase_: Optional[Any] = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?"
UpperCAmelCase_: Optional[int] = [
"Hello",
"(",
"bracket",
")",
"and",
"side",
"@-@",
"scrolled",
"[",
"and",
"]",
"Henry",
"'s",
"$",
"5",
"@,@",
"000",
"with",
"3",
"@.@",
"34",
"m",
".",
"What",
"'s",
"up",
"!",
"?",
]
self.assertListEqual(tokenizer.tokenize(A__ ) , A__ )
self.assertEqual(tokenizer.convert_tokens_to_string(A__ ) , A__ )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = self.get_tokenizer()
UpperCAmelCase_: Optional[Any] = len(A__ )
tokenizer.add_tokens(["new1", "new2"] )
tokenizer.move_added_token("new1" , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(A__ ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode("new1" ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , "new1" )
| 306
| 0
|
"""simple docstring"""
from math import pi
def lowercase_ ( _lowerCamelCase: List[Any] , _lowerCamelCase: Any ) -> float:
'''simple docstring'''
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 646
|
'''simple docstring'''
from datetime import datetime as dt
import os
from github import Github
UpperCamelCase__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = Github(os.environ['''GITHUB_TOKEN'''] )
UpperCAmelCase__ : List[Any] = g.get_repo('''huggingface/transformers''' )
UpperCAmelCase__ : List[str] = repo.get_issues(state='''open''' )
for issue in open_issues:
UpperCAmelCase__ : List[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase__ : i.created_at , reverse=lowerCAmelCase__ )
UpperCAmelCase__ : Tuple = comments[0] if len(lowerCAmelCase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 75
| 0
|
'''simple docstring'''
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase : str = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase : List[str] = [0, 25, 50]
__UpperCamelCase : Union[str, Any] = [25, 50, 75]
__UpperCamelCase : str = fuzz.membership.trimf(X, abca)
__UpperCamelCase : Optional[Any] = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase : Optional[Any] = np.ones(75)
__UpperCamelCase : int = np.zeros((75,))
# 1. Union = max(ยตA(x), ยตB(x))
__UpperCamelCase : Union[str, Any] = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(ยตA(x), ยตB(x))
__UpperCamelCase : Tuple = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(ยตA(x))
__UpperCamelCase : Tuple = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(ยตA(x),(1- ยตB(x)))
__UpperCamelCase : Optional[int] = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [ยตA(x) + ยตB(x) โ (ยตA(x) * ยตB(x))]
__UpperCamelCase : Optional[int] = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (ยตA(x) * ยตB(x))
__UpperCamelCase : List[Any] = young * middle_aged
# 7. Bounded Sum = min[1,(ยตA(x), ยตB(x))]
__UpperCamelCase : List[str] = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(ยตA(x), ยตB(x))]
__UpperCamelCase : List[str] = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 417
|
'''simple docstring'''
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowercase ( lowerCAmelCase : BertModel , lowerCAmelCase : str , lowerCAmelCase : str):
"""simple docstring"""
_A : List[str] = ('''dense.weight''', '''attention.self.query''', '''attention.self.key''', '''attention.self.value''')
_A : int = (
('''layer.''', '''layer_'''),
('''word_embeddings.weight''', '''word_embeddings'''),
('''position_embeddings.weight''', '''position_embeddings'''),
('''token_type_embeddings.weight''', '''token_type_embeddings'''),
('''.''', '''/'''),
('''LayerNorm/weight''', '''LayerNorm/gamma'''),
('''LayerNorm/bias''', '''LayerNorm/beta'''),
('''weight''', '''kernel'''),
)
if not os.path.isdir(lowerCAmelCase):
os.makedirs(lowerCAmelCase)
_A : Any = model.state_dict()
def to_tf_var_name(lowerCAmelCase : str):
for patt, repl in iter(lowerCAmelCase):
_A : Dict = name.replace(lowerCAmelCase , lowerCAmelCase)
return f"""bert/{name}"""
def create_tf_var(lowerCAmelCase : np.ndarray , lowerCAmelCase : str , lowerCAmelCase : tf.Session):
_A : Tuple = tf.dtypes.as_dtype(tensor.dtype)
_A : int = tf.get_variable(dtype=lowerCAmelCase , shape=tensor.shape , name=lowerCAmelCase , initializer=tf.zeros_initializer())
session.run(tf.variables_initializer([tf_var]))
session.run(lowerCAmelCase)
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_A : Dict = to_tf_var_name(lowerCAmelCase)
_A : Any = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose):
_A : str = torch_tensor.T
_A : Any = create_tf_var(tensor=lowerCAmelCase , name=lowerCAmelCase , session=lowerCAmelCase)
tf.keras.backend.set_value(lowerCAmelCase , lowerCAmelCase)
_A : List[Any] = session.run(lowerCAmelCase)
print(f"""Successfully created {tf_name}: {np.allclose(lowerCAmelCase , lowerCAmelCase)}""")
_A : Tuple = tf.train.Saver(tf.trainable_variables())
saver.save(lowerCAmelCase , os.path.join(lowerCAmelCase , model_name.replace('''-''' , '''_''') + '''.ckpt'''))
def lowercase ( lowerCAmelCase : Union[str, Any]=None):
"""simple docstring"""
_A : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--model_name''' , type=lowerCAmelCase , required=lowerCAmelCase , help='''model name e.g. bert-base-uncased''')
parser.add_argument(
'''--cache_dir''' , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help='''Directory containing pytorch model''')
parser.add_argument('''--pytorch_model_path''' , type=lowerCAmelCase , required=lowerCAmelCase , help='''/path/to/<pytorch-model-name>.bin''')
parser.add_argument('''--tf_cache_dir''' , type=lowerCAmelCase , required=lowerCAmelCase , help='''Directory in which to save tensorflow model''')
_A : Any = parser.parse_args(lowerCAmelCase)
_A : Optional[int] = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name)
if __name__ == "__main__":
main()
| 417
| 1
|
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
A_ : Tuple = logging.get_logger(__name__)
def snake_case () -> Optional[Any]:
# Get the sagemaker specific mp parameters from smp_options variable.
UpperCamelCase_: Optional[Any] = os.getenv('SM_HP_MP_PARAMETERS' , '{}' )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
UpperCamelCase_: List[str] = json.loads(UpperCAmelCase__ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
UpperCamelCase_: Any = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
UpperCamelCase_: Tuple = json.loads(UpperCAmelCase__ )
if not mpi_options.get('sagemaker_mpi_enabled' , UpperCAmelCase__ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec('smdistributed' ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str =field(
default='''''' , metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} , )
def _a ( self ):
super().__post_init__()
warnings.warn(
'`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use '
'`TrainingArguments` instead.' , _lowerCamelCase , )
@cached_property
def _a ( self ):
logger.info('PyTorch: setting up devices' )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
'torch.distributed process group is initialized, but local_rank == -1. '
'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch' )
if self.no_cuda:
UpperCamelCase_: str = torch.device('cpu' )
UpperCamelCase_: Optional[Any] = 0
elif is_sagemaker_model_parallel_available():
UpperCamelCase_: Optional[int] = smp.local_rank()
UpperCamelCase_: Any = torch.device('cuda' , _lowerCamelCase )
UpperCamelCase_: int = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend='smddp' , timeout=self.ddp_timeout_delta )
UpperCamelCase_: Optional[int] = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK' ) )
UpperCamelCase_: Dict = torch.device('cuda' , self.local_rank )
UpperCamelCase_: Union[str, Any] = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
UpperCamelCase_: Union[str, Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
UpperCamelCase_: Any = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend='nccl' , timeout=self.ddp_timeout_delta )
UpperCamelCase_: Optional[Any] = torch.device('cuda' , self.local_rank )
UpperCamelCase_: Optional[int] = 1
if device.type == "cuda":
torch.cuda.set_device(_lowerCamelCase )
return device
@property
def _a ( self ):
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def _a ( self ):
return not is_sagemaker_model_parallel_available()
@property
def _a ( self ):
return False
| 57
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A_ : List[str] = {
'configuration_roc_bert': ['ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoCBertConfig'],
'tokenization_roc_bert': ['RoCBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : Union[str, Any] = [
'ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoCBertForCausalLM',
'RoCBertForMaskedLM',
'RoCBertForMultipleChoice',
'RoCBertForPreTraining',
'RoCBertForQuestionAnswering',
'RoCBertForSequenceClassification',
'RoCBertForTokenClassification',
'RoCBertLayer',
'RoCBertModel',
'RoCBertPreTrainedModel',
'load_tf_weights_in_roc_bert',
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
A_ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 57
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase):
_UpperCamelCase:Any = StableDiffusionInpaintPipeline
_UpperCamelCase:int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
_UpperCamelCase:int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCamelCase:Union[str, Any] = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
_UpperCamelCase:Optional[Any] = frozenset([])
def _snake_case ( self )-> List[Any]:
torch.manual_seed(0 )
lowerCamelCase_ =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =PNDMScheduler(skip_prk_steps=_SCREAMING_SNAKE_CASE )
torch.manual_seed(0 )
lowerCamelCase_ =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase_ =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
lowerCamelCase_ =CLIPTextModel(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
lowerCamelCase_ ={
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 )-> str:
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
lowerCamelCase_ =floats_tensor((1, 3, 32, 32) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase_ =Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((64, 64) )
lowerCamelCase_ =Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(_SCREAMING_SNAKE_CASE ).startswith("""mps""" ):
lowerCamelCase_ =torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
lowerCamelCase_ =torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ ="""cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase_ =self.get_dummy_components()
lowerCamelCase_ =StableDiffusionInpaintPipeline(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =sd_pipe.to(_SCREAMING_SNAKE_CASE )
sd_pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =sd_pipe(**_SCREAMING_SNAKE_CASE ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCamelCase_ =np.array([0.4_7_2_7, 0.5_7_3_5, 0.3_9_4_1, 0.5_4_4_6, 0.5_9_2_6, 0.4_3_9_4, 0.5_0_6_2, 0.4_6_5_4, 0.4_4_7_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self )-> Tuple:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase_ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase_ =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
lowerCamelCase_ ="""stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase_ =StableDiffusionInpaintPipeline.from_pretrained(_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowerCamelCase_ ="""Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
lowerCamelCase_ =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase_ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase_ =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
lowerCamelCase_ ="""stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase_ =StableDiffusionInpaintPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , safety_checker=_SCREAMING_SNAKE_CASE , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowerCamelCase_ ="""Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , output_type="""np""" , )
lowerCamelCase_ =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def _snake_case ( self )-> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase_ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
lowerCamelCase_ =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
lowerCamelCase_ ="""stabilityai/stable-diffusion-2-inpainting"""
lowerCamelCase_ =PNDMScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
lowerCamelCase_ =StableDiffusionInpaintPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , safety_checker=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , torch_dtype=torch.floataa , )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCamelCase_ ="""Face of a yellow cat, high resolution, sitting on a park bench"""
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="""np""" , )
lowerCamelCase_ =torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.6_5 * 10**9
| 713
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__A : Any = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Union[str, Any] = ["pixel_values"]
def __init__( self , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = 1 / 255 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_MEAN , _SCREAMING_SNAKE_CASE = IMAGENET_DEFAULT_STD , **_SCREAMING_SNAKE_CASE , )-> None:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =size if size is not None else {"""shortest_edge""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =do_resize
lowerCamelCase_ =size
lowerCamelCase_ =resample
lowerCamelCase_ =do_center_crop
lowerCamelCase_ =crop_size
lowerCamelCase_ =do_rescale
lowerCamelCase_ =rescale_factor
lowerCamelCase_ =do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase_ =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
lowerCamelCase_ =int((256 / 224) * size["""shortest_edge"""] )
lowerCamelCase_ =get_resize_output_image_size(_SCREAMING_SNAKE_CASE , size=_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={"""height""": output_size[0], """width""": output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
_SCREAMING_SNAKE_CASE , size=(size_dict["""height"""], size_dict["""width"""]) , resample=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(_SCREAMING_SNAKE_CASE , size=(size["""height"""], size["""width"""]) , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return rescale(_SCREAMING_SNAKE_CASE , scale=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , )-> np.ndarray:
return normalize(_SCREAMING_SNAKE_CASE , mean=_SCREAMING_SNAKE_CASE , std=_SCREAMING_SNAKE_CASE , data_format=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **_SCREAMING_SNAKE_CASE , )-> BatchFeature:
lowerCamelCase_ =do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ =resample if resample is not None else self.resample
lowerCamelCase_ =do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ =do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ =rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ =do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ =image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ =image_std if image_std is not None else self.image_std
lowerCamelCase_ =size if size is not None else self.size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , default_to_square=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ =get_size_dict(_SCREAMING_SNAKE_CASE , param_name="""crop_size""" )
lowerCamelCase_ =make_list_of_images(_SCREAMING_SNAKE_CASE )
if not valid_images(_SCREAMING_SNAKE_CASE ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_ =[to_numpy_array(_SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowerCamelCase_ =[self.resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowerCamelCase_ =[self.center_crop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowerCamelCase_ =[self.rescale(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowerCamelCase_ =[self.normalize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ =[to_channel_dimension_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for image in images]
lowerCamelCase_ ={"""pixel_values""": images}
return BatchFeature(data=_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
| 75
| 0
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
A_ = (
"https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py"
)
A_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( ):
UpperCamelCase_ ="https://pypi.org/pypi/diffusers/json"
UpperCamelCase_ =json.loads(request.urlopen(A ).read() )["releases"].keys()
return sorted(A , key=lambda A : version.Version(A ) )
def _UpperCamelCase ( ):
# This function has already been executed if HF_MODULES_CACHE already is in the Python path.
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(A )
os.makedirs(A , exist_ok=A )
UpperCamelCase_ =Path(A ) / "__init__.py"
if not init_path.exists():
init_path.touch()
def _UpperCamelCase ( A ):
init_hf_modules()
UpperCamelCase_ =Path(A ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(A , exist_ok=A )
UpperCamelCase_ =dynamic_module_path / "__init__.py"
if not init_path.exists():
init_path.touch()
def _UpperCamelCase ( A ):
with open(A , "r" , encoding="utf-8" ) as f:
UpperCamelCase_ =f.read()
# Imports of the form `import .xxx`
UpperCamelCase_ =re.findall("^\s*import\s+\.(\S+)\s*$" , A , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall("^\s*from\s+\.(\S+)\s+import" , A , flags=re.MULTILINE )
# Unique-ify
return list(set(A ) )
def _UpperCamelCase ( A ):
UpperCamelCase_ =False
UpperCamelCase_ =[module_file]
UpperCamelCase_ =[]
# Let's recurse through all relative imports
while not no_change:
UpperCamelCase_ =[]
for f in files_to_check:
new_imports.extend(get_relative_imports(A ) )
UpperCamelCase_ =Path(A ).parent
UpperCamelCase_ =[str(module_path / m ) for m in new_imports]
UpperCamelCase_ =[f for f in new_import_files if f not in all_relative_imports]
UpperCamelCase_ =[f"""{f}.py""" for f in new_import_files]
UpperCamelCase_ =len(A ) == 0
all_relative_imports.extend(A )
return all_relative_imports
def _UpperCamelCase ( A ):
with open(A , "r" , encoding="utf-8" ) as f:
UpperCamelCase_ =f.read()
# Imports of the form `import xxx`
UpperCamelCase_ =re.findall("^\s*import\s+(\S+)\s*$" , A , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall("^\s*from\s+(\S+)\s+import" , A , flags=re.MULTILINE )
# Only keep the top-level module
UpperCamelCase_ =[imp.split("." )[0] for imp in imports if not imp.startswith("." )]
# Unique-ify and test we got them all
UpperCamelCase_ =list(set(A ) )
UpperCamelCase_ =[]
for imp in imports:
try:
importlib.import_module(A )
except ImportError:
missing_packages.append(A )
if len(A ) > 0:
raise ImportError(
"This modeling file requires the following packages that were not found in your environment: "
f"""{", ".join(A )}. Run `pip install {" ".join(A )}`""" )
return get_relative_imports(A )
def _UpperCamelCase ( A , A ):
UpperCamelCase_ =module_path.replace(os.path.sep , "." )
UpperCamelCase_ =importlib.import_module(A )
if class_name is None:
return find_pipeline_class(A )
return getattr(A , A )
def _UpperCamelCase ( A ):
from ..pipelines import DiffusionPipeline
UpperCamelCase_ =dict(inspect.getmembers(A , inspect.isclass ) )
UpperCamelCase_ =None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , A )
and cls.__module__.split("." )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f"""Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:"""
f""" {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in"""
f""" {loaded_module}.""" )
UpperCamelCase_ =cls
return pipeline_class
def _UpperCamelCase ( A , A , A = None , A = False , A = False , A = None , A = None , A = None , A = False , ):
UpperCamelCase_ =str(A )
UpperCamelCase_ =os.path.join(A , A )
if os.path.isfile(A ):
UpperCamelCase_ =module_file_or_url
UpperCamelCase_ ="local"
elif pretrained_model_name_or_path.count("/" ) == 0:
UpperCamelCase_ =get_diffusers_versions()
# cut ".dev0"
UpperCamelCase_ ="v" + ".".join(__version__.split("." )[:3] )
# retrieve github version that matches
if revision is None:
UpperCamelCase_ =latest_version if latest_version[1:] in available_versions else "main"
logger.info(f"""Defaulting to latest_version: {revision}.""" )
elif revision in available_versions:
UpperCamelCase_ =f"""v{revision}"""
elif revision == "main":
UpperCamelCase_ =revision
else:
raise ValueError(
f"""`custom_revision`: {revision} does not exist. Please make sure to choose one of"""
f""" {", ".join(available_versions + ["main"] )}.""" )
# community pipeline on GitHub
UpperCamelCase_ =COMMUNITY_PIPELINES_URL.format(revision=A , pipeline=A )
try:
UpperCamelCase_ =cached_download(
A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , )
UpperCamelCase_ ="git"
UpperCamelCase_ =pretrained_model_name_or_path + ".py"
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
else:
try:
# Load from URL or cache if already cached
UpperCamelCase_ =hf_hub_download(
A , A , cache_dir=A , force_download=A , proxies=A , resume_download=A , local_files_only=A , use_auth_token=A , )
UpperCamelCase_ =os.path.join("local" , "--".join(pretrained_model_name_or_path.split("/" ) ) )
except EnvironmentError:
logger.error(f"""Could not locate the {module_file} inside {pretrained_model_name_or_path}.""" )
raise
# Check we have all the requirements in our environment
UpperCamelCase_ =check_imports(A )
# Now we move the module inside our cached dynamic modules.
UpperCamelCase_ =DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(A )
UpperCamelCase_ =Path(A ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(A , submodule_path / module_file )
for module_needed in modules_needed:
UpperCamelCase_ =f"""{module_needed}.py"""
shutil.copy(os.path.join(A , A ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(A , A ):
UpperCamelCase_ =use_auth_token
elif use_auth_token is True:
UpperCamelCase_ =HfFolder.get_token()
else:
UpperCamelCase_ =None
UpperCamelCase_ =model_info(A , revision=A , token=A ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
UpperCamelCase_ =submodule_path / commit_hash
UpperCamelCase_ =full_submodule + os.path.sep + commit_hash
create_dynamic_module(A )
if not (submodule_path / module_file).exists():
shutil.copy(A , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
A , f"""{module_needed}.py""" , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
return os.path.join(A , A )
def _UpperCamelCase ( A , A , A = None , A = None , A = False , A = False , A = None , A = None , A = None , A = False , **A , ):
UpperCamelCase_ =get_cached_module_file(
A , A , cache_dir=A , force_download=A , resume_download=A , proxies=A , use_auth_token=A , revision=A , local_files_only=A , )
return get_class_in_module(A , final_module.replace(".py" , "" ) )
| 391
|
"""simple docstring"""
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def _UpperCamelCase ( A ):
return input_array.reshape((input_array.size, 1) )
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =np.nan
for i in range(A ):
UpperCamelCase_ =features[:, labels == i]
UpperCamelCase_ =data.mean(1 )
# Centralize the data of class i
UpperCamelCase_ =data - column_reshape(A )
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(A , centered_data.T )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase_ =np.dot(A , centered_data.T )
return covariance_sum / features.shape[1]
def _UpperCamelCase ( A , A , A ):
UpperCamelCase_ =features.mean(1 )
UpperCamelCase_ =np.nan
for i in range(A ):
UpperCamelCase_ =features[:, labels == i]
UpperCamelCase_ =data.shape[1]
UpperCamelCase_ =data.mean(1 )
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
UpperCamelCase_ =device_data * np.dot(
column_reshape(A ) - column_reshape(A ) , (column_reshape(A ) - column_reshape(A )).T , )
return covariance_sum / features.shape[1]
def _UpperCamelCase ( A , A ):
# Check if the features have been loaded
if features.any():
UpperCamelCase_ =features.mean(1 )
# Center the dataset
UpperCamelCase_ =features - np.reshape(A , (data_mean.size, 1) )
UpperCamelCase_ =np.dot(A , centered_data.T ) / features.shape[1]
UpperCamelCase_ , UpperCamelCase_ =np.linalg.eigh(A )
# Take all the columns in the reverse order (-1), and then takes only the first
UpperCamelCase_ =eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
UpperCamelCase_ =np.dot(filtered_eigenvectors.T , A )
logging.info("Principal Component Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def _UpperCamelCase ( A , A , A , A ):
assert classes > dimensions
# Check if features have been already loaded
if features.any:
UpperCamelCase_ , UpperCamelCase_ =eigh(
covariance_between_classes(A , A , A ) , covariance_within_classes(A , A , A ) , )
UpperCamelCase_ =eigenvectors[:, ::-1][:, :dimensions]
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =np.linalg.svd(A )
UpperCamelCase_ =svd_matrix[:, 0:dimensions]
UpperCamelCase_ =np.dot(filtered_svd_matrix.T , A )
logging.info("Linear Discriminant Analysis computed" )
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=A )
logging.error("Dataset empty" )
raise AssertionError
def _UpperCamelCase ( ):
# Create dummy dataset with 2 classes and 3 features
UpperCamelCase_ =np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] )
UpperCamelCase_ =np.array([0, 0, 0, 1, 1] )
UpperCamelCase_ =2
UpperCamelCase_ =2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(A ) as error_info:
UpperCamelCase_ =linear_discriminant_analysis(
A , A , A , A )
if isinstance(A , np.ndarray ):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes" )
assert error_info.type is AssertionError
def _UpperCamelCase ( ):
UpperCamelCase_ =np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] )
UpperCamelCase_ =2
UpperCamelCase_ =np.array([[6.92_82_03_23, 8.66_02_54_04, 10.39_23_04_85], [3.0, 3.0, 3.0]] )
with pytest.raises(A ) as error_info:
UpperCamelCase_ =principal_component_analysis(A , A )
if not np.allclose(A , A ):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 391
| 1
|
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
if b == 0:
return (1, 0)
((__lowercase) , (__lowercase)) = extended_euclid(lowerCamelCase_ , a % b )
__lowercase = a // b
return (y, x - k * y)
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
((__lowercase) , (__lowercase)) = extended_euclid(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
((__lowercase) , (__lowercase)) = extended_euclid(lowerCamelCase_ , lowerCamelCase_ )
if b < 0:
__lowercase = (b % n + n) % n
return b
def _lowerCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : int ):
__lowercase , __lowercase = invert_modulo(lowerCamelCase_ , lowerCamelCase_ ), invert_modulo(lowerCamelCase_ , lowerCamelCase_ )
__lowercase = na * na
__lowercase = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='''chinese_remainder_theorem''', verbose=True)
testmod(name='''chinese_remainder_theorem2''', verbose=True)
testmod(name='''invert_modulo''', verbose=True)
testmod(name='''extended_euclid''', verbose=True)
| 56
|
'''simple docstring'''
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def _lowerCAmelCase ( lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : List[str] ):
__lowercase = UniSpeechSatForSequenceClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''projector.weight''']
__lowercase = downstream_dict['''projector.bias''']
__lowercase = downstream_dict['''model.post_net.linear.weight''']
__lowercase = downstream_dict['''model.post_net.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
__lowercase = UniSpeechSatForAudioFrameClassification.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''model.linear.weight''']
__lowercase = downstream_dict['''model.linear.bias''']
return model
def _lowerCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = UniSpeechSatForXVector.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
__lowercase = downstream_dict['''connector.weight''']
__lowercase = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
__lowercase = downstream_dict[
f"model.framelevel_feature_extractor.module.{i}.kernel.weight"
]
__lowercase = downstream_dict[f"model.framelevel_feature_extractor.module.{i}.kernel.bias"]
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
__lowercase = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
__lowercase = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def _lowerCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int] ):
__lowercase = torch.load(lowerCamelCase_ , map_location='''cpu''' )
__lowercase = checkpoint['''Downstream''']
__lowercase = UniSpeechSatConfig.from_pretrained(lowerCamelCase_ )
__lowercase = WavaVecaFeatureExtractor.from_pretrained(
lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , do_normalize=lowerCamelCase_ )
__lowercase = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
__lowercase = convert_classification(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForAudioFrameClassification''' ):
__lowercase = convert_diarization(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
elif arch.endswith('''ForXVector''' ):
__lowercase = convert_xvector(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
else:
raise NotImplementedError(f"S3PRL weights conversion is not supported for {arch}" )
if hf_config.use_weighted_layer_sum:
__lowercase = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.'''
)
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''')
parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''')
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 56
| 1
|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class _snake_case :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=9 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=8 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.0_0_2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
lowercase__ : Optional[int] = parent
lowercase__ : Any = batch_size
lowercase__ : Dict = encoder_seq_length
lowercase__ : int = decoder_seq_length
# For common tests
lowercase__ : Tuple = self.decoder_seq_length
lowercase__ : List[Any] = is_training
lowercase__ : Dict = use_attention_mask
lowercase__ : Any = use_labels
lowercase__ : Any = vocab_size
lowercase__ : int = hidden_size
lowercase__ : List[str] = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : str = d_ff
lowercase__ : Tuple = relative_attention_num_buckets
lowercase__ : Optional[Any] = dropout_rate
lowercase__ : List[str] = initializer_factor
lowercase__ : Dict = eos_token_id
lowercase__ : List[Any] = pad_token_id
lowercase__ : Optional[int] = decoder_start_token_id
lowercase__ : Any = None
lowercase__ : str = decoder_layers
def lowercase__ ( self):
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""")
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
if attention_mask is None:
lowercase__ : int = input_ids.ne(config.pad_token_id)
if decoder_attention_mask is None:
lowercase__ : List[Any] = decoder_input_ids.ne(config.pad_token_id)
if head_mask is None:
lowercase__ : Optional[int] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_)
if decoder_head_mask is None:
lowercase__ : Any = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_)
if cross_attn_head_mask is None:
lowercase__ : int = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE_)
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size)
lowercase__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size)
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
lowercase__ : str = input_ids.clamp(self.pad_token_id + 1)
lowercase__ : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1)
lowercase__ : List[str] = self.get_config()
lowercase__ : List[Any] = config.num_attention_heads
lowercase__ : Dict = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return config, input_dict
def lowercase__ ( self):
'''simple docstring'''
lowercase__ , lowercase__ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self):
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self):
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : List[Any] = UMTaModel(config=SCREAMING_SNAKE_CASE_)
model.to(SCREAMING_SNAKE_CASE_)
model.eval()
lowercase__ : Union[str, Any] = model(
input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , decoder_attention_mask=SCREAMING_SNAKE_CASE_ , )
lowercase__ : int = model(input_ids=SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_)
lowercase__ : int = result.last_hidden_state
lowercase__ : Optional[int] = result.past_key_values
lowercase__ : Union[str, Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE_) , config.num_layers)
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0]) , 4)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Optional[Any] = UMTaModel(config=SCREAMING_SNAKE_CASE_).get_decoder().to(SCREAMING_SNAKE_CASE_).eval()
# first forward pass
lowercase__ : Any = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model(SCREAMING_SNAKE_CASE_)
lowercase__ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , use_cache=SCREAMING_SNAKE_CASE_)
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_) == len(SCREAMING_SNAKE_CASE_))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE_) == len(SCREAMING_SNAKE_CASE_) + 1)
lowercase__ , lowercase__ : Tuple = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size)
# append to next input_ids and
lowercase__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1)
lowercase__ : Any = model(SCREAMING_SNAKE_CASE_)["""last_hidden_state"""]
lowercase__ : List[str] = model(SCREAMING_SNAKE_CASE_ , past_key_values=SCREAMING_SNAKE_CASE_)["""last_hidden_state"""]
# select random slice
lowercase__ : List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
lowercase__ : Any = output_from_no_past[:, -1, random_slice_idx].detach()
lowercase__ : Union[str, Any] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3))
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowercase__ : Dict = UMTaModel(config=SCREAMING_SNAKE_CASE_).to(SCREAMING_SNAKE_CASE_).half().eval()
lowercase__ : str = model(**SCREAMING_SNAKE_CASE_)["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE_).any().item())
@require_torch
class _snake_case ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
__lowerCAmelCase : Any = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
__lowerCAmelCase : str = (UMTaForConditionalGeneration,) if is_torch_available() else ()
__lowerCAmelCase : Dict = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
__lowerCAmelCase : Any = True
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
__lowerCAmelCase : Tuple = [0.8, 0.9]
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[str] = UMTaModelTester(self)
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""")
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
lowercase__ : str = UMTaModel(config_and_inputs[0]).to(SCREAMING_SNAKE_CASE_)
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'{tmpdirname}/t5_test.onnx' , export_params=SCREAMING_SNAKE_CASE_ , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""")
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
lowercase__ : int = config_and_inputs[0]
lowercase__ : Tuple = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_).eval()
model.to(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE_ , head_masking.items()):
lowercase__ : str = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
lowercase__ : Optional[int] = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE_ , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
lowercase__ : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights]) , 0.0)
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""")
def lowercase__ ( self):
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class _snake_case ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""")
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Tuple = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=SCREAMING_SNAKE_CASE_).to(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=SCREAMING_SNAKE_CASE_ , legacy=SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
lowercase__ : Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , padding=SCREAMING_SNAKE_CASE_).input_ids
# fmt: off
lowercase__ : Dict = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
])
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = model.generate(input_ids.to(SCREAMING_SNAKE_CASE_))
lowercase__ : List[Any] = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
lowercase__ : List[str] = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_)
self.assertEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
| 12
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowercase = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __lowerCAmelCase ( _UpperCamelCase ) -> str:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase ) -> Any:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
lowerCamelCase__: Optional[int] = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(_UpperCamelCase , id=_UpperCamelCase )
| 306
| 0
|
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@slow
def __magic_name__( self ):
lowerCAmelCase__ : Optional[int] = AutoImageProcessor.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
lowerCAmelCase__ : List[Any] = AutoModelForImageClassification.from_pretrained('''microsoft/dit-base-finetuned-rvlcdip''' )
model.to(__UpperCAmelCase )
from datasets import load_dataset
lowerCAmelCase__ : List[Any] = load_dataset('''nielsr/rvlcdip-demo''' )
lowerCAmelCase__ : Tuple = dataset['''train'''][0]['''image'''].convert('''RGB''' )
lowerCAmelCase__ : Optional[int] = image_processor(__UpperCAmelCase , return_tensors='''pt''' ).to(__UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : int = model(**__UpperCAmelCase )
lowerCAmelCase__ : List[str] = outputs.logits
lowerCAmelCase__ : List[Any] = torch.Size((1, 16) )
self.assertEqual(logits.shape , __UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = torch.tensor(
[-0.4158, -0.4092, -0.4347] , device=__UpperCAmelCase , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1e-4 ) )
| 470
|
def __lowerCAmelCase ( UpperCamelCase ) -> None:
lowerCAmelCase__ : Dict = generate_pascal_triangle(UpperCamelCase )
for row_idx in range(UpperCamelCase ):
# Print left spaces
for _ in range(num_rows - row_idx - 1 ):
print(end=''' ''' )
# Print row values
for col_idx in range(row_idx + 1 ):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=''' ''' )
else:
print(triangle[row_idx][col_idx] , end='''''' )
print()
def __lowerCAmelCase ( UpperCamelCase ) -> list[list[int]]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCAmelCase__ : list[list[int]] = []
for current_row_idx in range(UpperCamelCase ):
lowerCAmelCase__ : List[str] = populate_current_row(UpperCamelCase , UpperCamelCase )
triangle.append(UpperCamelCase )
return triangle
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase ) -> list[int]:
lowerCAmelCase__ : Optional[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
lowerCAmelCase__ , lowerCAmelCase__ : int = 1, 1
for current_col_idx in range(1 , UpperCamelCase ):
calculate_current_element(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
return current_row
def __lowerCAmelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) -> None:
lowerCAmelCase__ : Optional[int] = triangle[current_row_idx - 1][current_col_idx - 1]
lowerCAmelCase__ : List[str] = triangle[current_row_idx - 1][current_col_idx]
lowerCAmelCase__ : List[Any] = above_to_left_elt + above_to_right_elt
def __lowerCAmelCase ( UpperCamelCase ) -> list[list[int]]:
if not isinstance(UpperCamelCase , UpperCamelCase ):
raise TypeError('''The input value of \'num_rows\' should be \'int\'''' )
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
'''The input value of \'num_rows\' should be greater than or equal to 0''' )
lowerCAmelCase__ : list[list[int]] = [[1]]
for row_index in range(1 , UpperCamelCase ):
lowerCAmelCase__ : Optional[int] = [0] + result[-1] + [0]
lowerCAmelCase__ : List[str] = row_index + 1
# Calculate the number of distinct elements in a row
lowerCAmelCase__ : Tuple = sum(divmod(UpperCamelCase , 2 ) )
lowerCAmelCase__ : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 )
]
lowerCAmelCase__ : int = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
lowerCAmelCase__ : Dict = row_first_half + row_second_half
result.append(UpperCamelCase )
return result
def __lowerCAmelCase ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase , UpperCamelCase ) -> None:
lowerCAmelCase__ : Tuple = F"""{func.__name__}({value})"""
lowerCAmelCase__ : Dict = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F"""{call:38} -- {timing:.4f} seconds""" )
for value in range(15 ): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 470
| 1
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
lowercase_ = logging.get_logger(__name__)
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] ) -> Tuple:
try:
with open(_A , '''rb''' ) as flax_state_f:
__a = from_bytes(_A , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(_A ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'''Unable to convert {model_file} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(_A , _A )
def lowercase ( lowerCAmelCase__ : Dict , lowerCAmelCase__ : Dict ) -> Any:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
__a = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase__ : x.dtype == jnp.bfloataa , _A ) ).values()
if any(_A ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
__a = jax.tree_util.tree_map(
lambda lowerCAmelCase__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _A )
__a = ''''''
__a = flatten_dict(_A , sep='''.''' )
__a = pt_model.state_dict()
# keep track of unexpected & missing keys
__a = []
__a = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__a = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
__a = flax_key_tuple_array[:-1] + ['''weight''']
__a = jnp.transpose(_A , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
__a = flax_key_tuple_array[:-1] + ['''weight''']
__a = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
__a = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(_A ):
__a = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
__a = '''.'''.join(_A )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
__a = np.asarray(_A ) if not isinstance(_A , np.ndarray ) else flax_tensor
__a = torch.from_numpy(_A )
# remove from missing keys
missing_keys.remove(_A )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_A )
pt_model.load_state_dict(_A )
# re-transform missing_keys to list
__a = list(_A )
if len(_A ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(_A ) > 0:
logger.warning(
f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
return pt_model
| 695
|
import random
class _lowercase :
"""simple docstring"""
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
_lowercase = [ord(UpperCAmelCase ) for i in text]
_lowercase = []
_lowercase = []
for i in plain:
_lowercase = random.randint(1 , 300 )
_lowercase = (i + k) * k
cipher.append(UpperCAmelCase )
key.append(UpperCAmelCase )
return cipher, key
@staticmethod
def _UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowercase = []
for i in range(len(UpperCAmelCase ) ):
_lowercase = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(UpperCAmelCase ) )
return "".join(UpperCAmelCase )
if __name__ == "__main__":
A_ , A_: List[Any] = Onepad().encrypt('Hello')
print(c, k)
print(Onepad().decrypt(c, k))
| 398
| 0
|
"""simple docstring"""
def _snake_case ( lowercase__ : list[int] ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = len(lowercase__ )
for i in range(lowercase__ ):
for j in range(i + 1 , lowercase__ ):
if numbers[j] < numbers[i]:
lowerCAmelCase_ , lowerCAmelCase_ :int = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
__UpperCAmelCase = input('Enter numbers separated by a comma:\n').strip()
__UpperCAmelCase = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 256
|
"""simple docstring"""
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _SCREAMING_SNAKE_CASE :
def __init__( self , __A , __A=14 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.0_2 , __A=3 , __A=4 , __A=None , ) -> Any:
lowerCAmelCase_ :Optional[int] = parent
lowerCAmelCase_ :Dict = batch_size
lowerCAmelCase_ :Optional[int] = seq_length
lowerCAmelCase_ :List[Any] = is_training
lowerCAmelCase_ :Tuple = use_token_type_ids
lowerCAmelCase_ :List[str] = use_input_mask
lowerCAmelCase_ :Tuple = use_labels
lowerCAmelCase_ :List[Any] = use_mc_token_ids
lowerCAmelCase_ :List[Any] = vocab_size
lowerCAmelCase_ :Tuple = hidden_size
lowerCAmelCase_ :Optional[int] = num_hidden_layers
lowerCAmelCase_ :Optional[int] = num_attention_heads
lowerCAmelCase_ :Tuple = intermediate_size
lowerCAmelCase_ :List[str] = hidden_act
lowerCAmelCase_ :Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ :int = max_position_embeddings
lowerCAmelCase_ :str = type_vocab_size
lowerCAmelCase_ :Dict = type_sequence_label_size
lowerCAmelCase_ :List[str] = initializer_range
lowerCAmelCase_ :int = num_labels
lowerCAmelCase_ :List[str] = num_choices
lowerCAmelCase_ :str = scope
lowerCAmelCase_ :Optional[Any] = self.vocab_size - 1
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ :List[str] = None
if self.use_input_mask:
lowerCAmelCase_ :Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ :Any = None
if self.use_token_type_ids:
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ :Optional[int] = None
if self.use_mc_token_ids:
lowerCAmelCase_ :Optional[Any] = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
lowerCAmelCase_ :Union[str, Any] = None
lowerCAmelCase_ :List[Any] = None
lowerCAmelCase_ :List[Any] = None
if self.use_labels:
lowerCAmelCase_ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ :int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ :int = self.get_config()
lowerCAmelCase_ :Dict = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCAmelCase ( self ) -> Optional[Any]:
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , *__A ) -> Optional[int]:
lowerCAmelCase_ :List[Any] = CTRLModel(config=__A )
model.to(__A )
model.eval()
model(__A , token_type_ids=__A , head_mask=__A )
model(__A , token_type_ids=__A )
lowerCAmelCase_ :Any = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A , *__A ) -> List[Any]:
lowerCAmelCase_ :Optional[int] = CTRLLMHeadModel(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :Tuple = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) :Tuple = config_and_inputs
lowerCAmelCase_ :Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def __lowerCAmelCase ( self , __A , __A , __A , __A , *__A ) -> Tuple:
lowerCAmelCase_ :int = self.num_labels
lowerCAmelCase_ :List[str] = CTRLForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCAmelCase_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ :Union[str, Any] = model(__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class _SCREAMING_SNAKE_CASE ( A__ , A__ , A__ , unittest.TestCase ):
UpperCAmelCase_ :Optional[int] = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase_ :List[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase_ :Dict = (
{
"feature-extraction": CTRLModel,
"text-classification": CTRLForSequenceClassification,
"text-generation": CTRLLMHeadModel,
"zero-shot": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase_ :Tuple = True
UpperCAmelCase_ :List[str] = False
UpperCAmelCase_ :Union[str, Any] = False
def __lowerCAmelCase ( self , __A , __A , __A , __A , __A ) -> List[Any]:
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __lowerCAmelCase ( self ) -> Optional[int]:
lowerCAmelCase_ :int = CTRLModelTester(self )
lowerCAmelCase_ :List[str] = ConfigTester(self , config_class=__A , n_embd=37 )
def __lowerCAmelCase ( self ) -> Any:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Any:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*__A )
def __lowerCAmelCase ( self ) -> Optional[Any]:
lowerCAmelCase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*__A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __lowerCAmelCase ( self ) -> Optional[Any]:
pass
@slow
def __lowerCAmelCase ( self ) -> str:
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ :Optional[Any] = CTRLModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def __lowerCAmelCase ( self ) -> str:
pass
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Union[str, Any]:
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __lowerCAmelCase ( self ) -> List[Any]:
lowerCAmelCase_ :Optional[Any] = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(__A )
lowerCAmelCase_ :str = torch.tensor(
[[1_1859, 0, 1611, 8]] , dtype=torch.long , device=__A ) # Legal the president is
lowerCAmelCase_ :Dict = [
1_1859,
0,
1611,
8,
5,
150,
2_6449,
2,
19,
348,
469,
3,
2595,
48,
2_0740,
24_6533,
24_6533,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
lowerCAmelCase_ :Dict = model.generate(__A , do_sample=__A )
self.assertListEqual(output_ids[0].tolist() , __A )
| 256
| 1
|
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[str] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any]=7 , UpperCamelCase_: Optional[Any]=3 , UpperCamelCase_: Tuple=18 , UpperCamelCase_: Any=30 , UpperCamelCase_: List[str]=400 , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Dict=None , UpperCamelCase_: List[str]=True , ):
UpperCamelCase_ =size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =num_channels
UpperCamelCase_ =image_size
UpperCamelCase_ =min_resolution
UpperCamelCase_ =max_resolution
UpperCamelCase_ =do_resize
UpperCamelCase_ =size
UpperCamelCase_ =do_normalize
def UpperCamelCase__ ( self: Any ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804],
[-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __lowerCAmelCase ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : int = ImageGPTImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =ImageGPTImageProcessingTester(self )
@property
def UpperCamelCase__ ( self: Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCamelCase_ , "clusters" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "size" ) )
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize" ) )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
UpperCamelCase_ =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
UpperCamelCase_ =json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ =os.path.join(UpperCamelCase_ , "image_processor.json" )
image_processor_first.to_json_file(UpperCamelCase_ )
UpperCamelCase_ =self.image_processing_class.from_json_file(UpperCamelCase_ ).to_dict()
UpperCamelCase_ =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase_ )
def UpperCamelCase__ ( self: Optional[int] ):
UpperCamelCase_ =self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(UpperCamelCase_ )
UpperCamelCase_ =self.image_processing_class.from_pretrained(UpperCamelCase_ ).to_dict()
UpperCamelCase_ =image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(UpperCamelCase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , UpperCamelCase_ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def UpperCamelCase__ ( self: Union[str, Any] ):
pass
def _UpperCamelCase ( ):
UpperCamelCase_ =load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
UpperCamelCase_ =Image.open(dataset[4]["file"] )
UpperCamelCase_ =Image.open(dataset[5]["file"] )
UpperCamelCase_ =[imagea, imagea]
return images
@require_vision
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self: Optional[Any] ):
UpperCamelCase_ =ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
UpperCamelCase_ =prepare_images()
# test non-batched
UpperCamelCase_ =image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1024) )
UpperCamelCase_ =[306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() , UpperCamelCase_ )
# test batched
UpperCamelCase_ =image_processing(UpperCamelCase_ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1024) )
UpperCamelCase_ =[303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , UpperCamelCase_ )
| 391
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Tuple ,_UpperCAmelCase : Optional[int] ,_UpperCAmelCase : int ) -> str:
# Initialise PyTorch model
_a : List[str] =RemBertConfig.from_json_file(_UpperCAmelCase )
print("""Building PyTorch model from configuration: {}""".format(str(_UpperCAmelCase ) ) )
_a : Dict =RemBertModel(_UpperCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_UpperCAmelCase ) )
torch.save(model.state_dict() ,_UpperCAmelCase )
if __name__ == "__main__":
A__: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__: Tuple = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 694
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
lowercase
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 714
|
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.