code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import math
def _UpperCAmelCase ( a__):
'''simple docstring'''
if num <= 0:
a_ : Any = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(a__)
a_ : List[Any] = [True] * (num + 1)
a_ : Optional[Any] = []
a_ : str = 2
a_ : Any = int(math.sqrt(a__))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(a__)
# Set multiples of start be False
for i in range(start * start , num + 1 , a__):
if sieve[i] is True:
a_ : List[str] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(a__)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 248
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class A__(a_ ):
"""simple docstring"""
def __init__( self , *_lowercase , _lowercase=None , _lowercase=None , **_lowercase ) -> Optional[Any]:
super().__init__(*_lowercase , **_lowercase )
a_ : Optional[int] = eval_examples
a_ : Tuple = post_process_function
def UpperCamelCase__ ( self , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase = "eval" ) -> Union[str, Any]:
a_ : List[str] = self.eval_dataset if eval_dataset is None else eval_dataset
a_ : List[str] = self.get_eval_dataloader(_lowercase )
a_ : List[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a_ : Optional[int] = self.compute_metrics
a_ : List[str] = None
a_ : Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Any = time.time()
try:
a_ : Union[str, Any] = eval_loop(
_lowercase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Dict = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a_ : List[Any] = self.post_process_function(_lowercase , _lowercase , output.predictions )
a_ : Optional[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : List[str] = metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
a_ : List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a_ : List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=None , _lowercase = "test" ) -> str:
a_ : Tuple = self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
a_ : List[Any] = self.compute_metrics
a_ : int = None
a_ : Optional[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a_ : Union[str, Any] = time.time()
try:
a_ : List[str] = eval_loop(
_lowercase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
a_ : Optional[Any] = compute_metrics
a_ : Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a_ : Optional[int] = self.post_process_function(_lowercase , _lowercase , output.predictions , """predict""" )
a_ : List[Any] = self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a_ : int = metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 248
| 1
|
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __a ( snake_case__ ):
"""simple docstring"""
def __init__( self : int , lowercase_ : int , lowercase_ : Optional[int]=13 , lowercase_ : str=7 , lowercase_ : str=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=True , lowercase_ : Tuple=True , lowercase_ : List[Any]=False , lowercase_ : List[Any]=False , lowercase_ : str=False , lowercase_ : Dict=2 , lowercase_ : Any=99 , lowercase_ : Union[str, Any]=0 , lowercase_ : Dict=32 , lowercase_ : Optional[Any]=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Any=0.1 , lowercase_ : int=0.1 , lowercase_ : Union[str, Any]=512 , lowercase_ : Tuple=12 , lowercase_ : Any=2 , lowercase_ : Union[str, Any]=0.0_2 , lowercase_ : Tuple=3 , lowercase_ : Any=4 , lowercase_ : Union[str, Any]="last" , lowercase_ : List[Any]=None , lowercase_ : Tuple=None , ):
UpperCamelCase__ : int =parent
UpperCamelCase__ : List[str] =batch_size
UpperCamelCase__ : Tuple =seq_length
UpperCamelCase__ : Tuple =is_training
UpperCamelCase__ : str =use_input_lengths
UpperCamelCase__ : str =use_token_type_ids
UpperCamelCase__ : Optional[Any] =use_labels
UpperCamelCase__ : str =gelu_activation
UpperCamelCase__ : List[str] =sinusoidal_embeddings
UpperCamelCase__ : Optional[int] =causal
UpperCamelCase__ : Dict =asm
UpperCamelCase__ : Tuple =n_langs
UpperCamelCase__ : str =vocab_size
UpperCamelCase__ : Dict =n_special
UpperCamelCase__ : Optional[int] =hidden_size
UpperCamelCase__ : str =num_hidden_layers
UpperCamelCase__ : Optional[Any] =num_attention_heads
UpperCamelCase__ : List[str] =hidden_dropout_prob
UpperCamelCase__ : Dict =attention_probs_dropout_prob
UpperCamelCase__ : int =max_position_embeddings
UpperCamelCase__ : Optional[Any] =type_vocab_size
UpperCamelCase__ : List[str] =type_sequence_label_size
UpperCamelCase__ : Optional[Any] =initializer_range
UpperCamelCase__ : str =num_labels
UpperCamelCase__ : str =num_choices
UpperCamelCase__ : Optional[Any] =summary_type
UpperCamelCase__ : Optional[int] =use_proj
UpperCamelCase__ : Dict =scope
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ : Tuple =random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ : Tuple =None
if self.use_input_lengths:
UpperCamelCase__ : str =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase__ : List[str] =None
if self.use_token_type_ids:
UpperCamelCase__ : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase__ : List[Any] =None
UpperCamelCase__ : Optional[Any] =None
UpperCamelCase__ : Optional[int] =None
if self.use_labels:
UpperCamelCase__ : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase__ : int =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase__ : Optional[Any] =ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase__ : List[str] =ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase__ : Optional[int] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _lowerCAmelCase ( self : List[Any] ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : int , lowercase_ : Dict , lowercase_ : str , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : List[Any] , ):
UpperCamelCase__ : int =FlaubertModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Tuple =model(lowercase_ , lengths=lowercase_ , langs=lowercase_ )
UpperCamelCase__ : Optional[Any] =model(lowercase_ , langs=lowercase_ )
UpperCamelCase__ : Optional[int] =model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : List[str] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , ):
UpperCamelCase__ : Any =FlaubertWithLMHeadModel(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : int =model(lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : int , lowercase_ : Optional[Any] , lowercase_ : int , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : str , ):
UpperCamelCase__ : Optional[int] =FlaubertForQuestionAnsweringSimple(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Any =model(lowercase_ )
UpperCamelCase__ : Optional[int] =model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : List[Any] , ):
UpperCamelCase__ : List[str] =FlaubertForQuestionAnswering(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Any =model(lowercase_ )
UpperCamelCase__ : Union[str, Any] =model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , p_mask=lowercase_ , )
UpperCamelCase__ : Optional[int] =model(
lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , cls_index=lowercase_ , is_impossible=lowercase_ , )
((UpperCamelCase__) , ) : int =result_with_labels.to_tuple()
UpperCamelCase__ : str =model(lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ )
((UpperCamelCase__) , ) : Dict =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _lowerCAmelCase ( self : str , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Any , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : str , lowercase_ : List[str] , ):
UpperCamelCase__ : int =FlaubertForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Optional[Any] =model(lowercase_ )
UpperCamelCase__ : Union[str, Any] =model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : List[str] , lowercase_ : Any , ):
UpperCamelCase__ : Any =self.num_labels
UpperCamelCase__ : Union[str, Any] =FlaubertForTokenClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Any =model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : str , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : Dict , ):
UpperCamelCase__ : Optional[Any] =self.num_choices
UpperCamelCase__ : str =FlaubertForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : Optional[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ : Optional[Any] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ : Optional[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase__ : Optional[Any] =model(
lowercase_ , attention_mask=lowercase_ , token_type_ids=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : int ):
UpperCamelCase__ : Optional[Any] =self.prepare_config_and_inputs()
(
(
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) , (
UpperCamelCase__
) ,
) : Optional[int] =config_and_inputs
UpperCamelCase__ : List[Any] ={
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''lengths''': input_lengths,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class __a ( snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _lowerCAmelCase ( self : Tuple , lowercase_ : str , lowercase_ : int , lowercase_ : str , lowercase_ : int , lowercase_ : int ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _lowerCAmelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : List[Any]=False ):
UpperCamelCase__ : List[str] =super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
UpperCamelCase__ : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
UpperCamelCase__ : Union[str, Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase_ )
return inputs_dict
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : List[str] =FlaubertModelTester(self )
UpperCamelCase__ : str =ConfigTester(self , config_class=lowercase_ , emb_dim=37 )
def _lowerCAmelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowercase_ )
def _lowerCAmelCase ( self : Any ):
UpperCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowercase_ )
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowercase_ )
def _lowerCAmelCase ( self : Dict ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowercase_ )
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowercase_ )
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowercase_ )
@slow
def _lowerCAmelCase ( self : Optional[Any] ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Optional[int] =FlaubertModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
@slow
@require_torch_gpu
def _lowerCAmelCase ( self : Tuple ):
UpperCamelCase__ , UpperCamelCase__ : Optional[int] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
UpperCamelCase__ : Union[str, Any] =True
UpperCamelCase__ : Any =model_class(config=lowercase_ )
UpperCamelCase__ : int =self._prepare_for_class(lowercase_ , lowercase_ )
UpperCamelCase__ : int =torch.jit.trace(
lowercase_ , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowercase_ , os.path.join(lowercase_ , '''traced_model.pt''' ) )
UpperCamelCase__ : List[Any] =torch.jit.load(os.path.join(lowercase_ , '''traced_model.pt''' ) , map_location=lowercase_ )
loaded(inputs_dict['''input_ids'''].to(lowercase_ ) , inputs_dict['''attention_mask'''].to(lowercase_ ) )
@require_torch
class __a ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : str ):
UpperCamelCase__ : int =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
UpperCamelCase__ : Union[str, Any] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
UpperCamelCase__ : Union[str, Any] =model(lowercase_ )[0]
UpperCamelCase__ : Tuple =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowercase_ )
UpperCamelCase__ : Optional[int] =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1e-4 ) )
| 157
|
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
_SCREAMING_SNAKE_CASE : str = False
class __a ( unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class __a ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Optional[int] ):
UpperCamelCase__ : Any =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : int =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase__ : Dict =torch.manual_seed(0 )
UpperCamelCase__ : Optional[int] =pipe.dual_guided(
prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowercase_ )
UpperCamelCase__ : str =VersatileDiffusionPipeline.from_pretrained(lowercase_ , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : int =generator.manual_seed(0 )
UpperCamelCase__ : str =pipe.dual_guided(
prompt='''first prompt''' , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Dict =VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCamelCase__ : str ='''cyberpunk 2077'''
UpperCamelCase__ : str =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' )
UpperCamelCase__ : int =torch.manual_seed(0 )
UpperCamelCase__ : int =pipe.dual_guided(
prompt=lowercase_ , image=lowercase_ , text_to_image_strength=0.7_5 , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' , ).images
UpperCamelCase__ : List[str] =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Dict =np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Dict ='''A painting of a squirrel eating a burger '''
UpperCamelCase__ : Optional[int] =torch.manual_seed(0 )
UpperCamelCase__ : str =pipe.text_to_image(
prompt=lowercase_ , generator=lowercase_ , guidance_scale=7.5 , num_inference_steps=50 , output_type='''numpy''' ).images
UpperCamelCase__ : str =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : List[Any] =np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
UpperCamelCase__ : Optional[Any] =pipe.image_variation(lowercase_ , generator=lowercase_ , output_type='''numpy''' ).images
UpperCamelCase__ : str =image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
UpperCamelCase__ : Tuple =np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
| 157
| 1
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = abs(__UpperCamelCase )
UpperCamelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowercase__ ( __UpperCamelCase )-> int:
UpperCamelCase = abs(__UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowercase__ ( __UpperCamelCase )-> int:
return sum(int(__UpperCamelCase ) for c in str(abs(__UpperCamelCase ) ) )
def lowercase__ ( )-> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__UpperCamelCase , __UpperCamelCase ) -> None:
UpperCamelCase = F"{func.__name__}({value})"
UpperCamelCase = timeit(F"__main__.{call}" , setup="""import __main__""" )
print(F"{call:56} = {func(__UpperCamelCase )} -- {timing:.4f} seconds" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__UpperCamelCase , __UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 321
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> bool:
# Base Case
if index == len(__UpperCamelCase ):
return True
# Recursive Step
for i in range(__UpperCamelCase ):
if valid_coloring(graph[index] , __UpperCamelCase , __UpperCamelCase ):
# Color current vertex
UpperCamelCase = i
# Validate coloring
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , index + 1 ):
return True
# Backtrack
UpperCamelCase = -1
return False
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> list[int]:
UpperCamelCase = [-1] * len(__UpperCamelCase )
if util_color(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , 0 ):
return colored_vertices
return []
| 321
| 1
|
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# Return True if there is node that has not iterated.
_UpperCamelCase : Dict = [False] * len(UpperCAmelCase_ )
_UpperCamelCase : Union[str, Any] = []
queue.append(UpperCAmelCase_ )
_UpperCamelCase : Any = True
while queue:
_UpperCamelCase : Tuple = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase_ )
_UpperCamelCase : str = True
_UpperCamelCase : Any = u
return visited[t]
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
# This array is filled by BFS and to store path
_UpperCamelCase : List[str] = [-1] * (len(UpperCAmelCase_ ))
_UpperCamelCase : Union[str, Any] = 0
while bfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : Optional[Any] = float('Inf' )
_UpperCamelCase : Optional[Any] = sink
while s != source:
# Find the minimum value in select path
_UpperCamelCase : Union[str, Any] = min(UpperCAmelCase_ , graph[parent[s]][s] )
_UpperCamelCase : Tuple = parent[s]
max_flow += path_flow
_UpperCamelCase : Optional[Any] = sink
while v != source:
_UpperCamelCase : Tuple = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_UpperCamelCase : Any = parent[v]
return max_flow
snake_case_ : Dict = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
snake_case_ : List[str] = 0, 5
print(ford_fulkerson(graph, source, sink))
| 368
|
'''simple docstring'''
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
snake_case_ : Dict = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=False , UpperCAmelCase_=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : List[str] = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
_UpperCamelCase : Optional[int] = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
_UpperCamelCase : Optional[Any] = config_class.from_json_file(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = True
_UpperCamelCase : List[str] = True
print(f'Building TensorFlow model from configuration: {config}' )
_UpperCamelCase : Any = model_class(UpperCAmelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
_UpperCamelCase : Union[str, Any] = cached_file(
UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
_UpperCamelCase : List[Any] = load_pytorch_checkpoint_in_tfa_model(UpperCAmelCase_ , UpperCAmelCase_ )
if compare_with_pt_model:
_UpperCamelCase : Optional[int] = tf_model(tf_model.dummy_inputs , training=UpperCAmelCase_ ) # build the network
_UpperCamelCase : Optional[Any] = torch.load(UpperCAmelCase_ , map_location='cpu' )
_UpperCamelCase : List[str] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase_ , config=UpperCAmelCase_ , state_dict=UpperCAmelCase_ )
with torch.no_grad():
_UpperCamelCase : Optional[Any] = pt_model(**pt_model.dummy_inputs )
_UpperCamelCase : int = pto[0].numpy()
_UpperCamelCase : Any = tfo[0].numpy()
_UpperCamelCase : Dict = np.amax(np.abs(np_pt - np_tf ) )
print(f'Max absolute difference between models outputs {diff}' )
assert diff <= 2E-2, f'Error, model absolute difference is >2e-2: {diff}'
# Save pytorch-model
print(f'Save TensorFlow model to {tf_dump_path}' )
tf_model.save_weights(UpperCAmelCase_ , save_format='h5' )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , UpperCAmelCase_=False , ):
if args_model_type is None:
_UpperCamelCase : List[Any] = list(MODEL_CLASSES.keys() )
else:
_UpperCamelCase : Tuple = [args_model_type]
for j, model_type in enumerate(UpperCAmelCase_ , start=1 ):
print('=' * 1_0_0 )
print(f' Converting model type {j}/{len(UpperCAmelCase_ )}: {model_type}' )
print('=' * 1_0_0 )
if model_type not in MODEL_CLASSES:
raise ValueError(f'Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.' )
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase : Any = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
_UpperCamelCase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
_UpperCamelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCAmelCase_ , UpperCAmelCase_ ) , start=1 ):
print('-' * 1_0_0 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f' Skipping finetuned checkpoint {model_shortcut_name}' )
continue
_UpperCamelCase : Dict = model_shortcut_name
elif only_convert_finetuned_models:
print(f' Skipping not finetuned checkpoint {model_shortcut_name}' )
continue
print(
f' Converting checkpoint {i}/{len(UpperCAmelCase_ )}: {model_shortcut_name} - model_type {model_type}' )
print('-' * 1_0_0 )
if config_shortcut_name in aws_config_map:
_UpperCamelCase : Any = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : str = config_shortcut_name
if model_shortcut_name in aws_model_maps:
_UpperCamelCase : int = cached_file(UpperCAmelCase_ , UpperCAmelCase_ , force_download=not use_cached_models )
else:
_UpperCamelCase : List[str] = model_shortcut_name
if os.path.isfile(UpperCAmelCase_ ):
_UpperCamelCase : Union[str, Any] = 'converted_model'
convert_pt_checkpoint_to_tf(
model_type=UpperCAmelCase_ , pytorch_checkpoint_path=UpperCAmelCase_ , config_file=UpperCAmelCase_ , tf_dump_path=os.path.join(UpperCAmelCase_ , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=UpperCAmelCase_ , )
if remove_cached_files:
os.remove(UpperCAmelCase_ )
os.remove(UpperCAmelCase_ )
if __name__ == "__main__":
snake_case_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
F"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
snake_case_ : Optional[int] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 236
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
snake_case__ : Dict = TaConfig.from_json_file(__lowerCAmelCase )
print(f"""Building PyTorch model from configuration: {config}""" )
snake_case__ : List[str] = TaForConditionalGeneration(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_ta(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
A__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 230
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
A__ = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 230
| 1
|
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE__ ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def __lowerCamelCase ( __UpperCamelCase ) -> int:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase ) -> Any:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
__UpperCamelCase : Tuple = kwargs.pop("main_process_only" , __UpperCamelCase )
__UpperCamelCase : Tuple = kwargs.pop("in_order" , __UpperCamelCase )
if self.isEnabledFor(__UpperCamelCase ):
if self._should_log(__UpperCamelCase ):
__UpperCamelCase : Union[str, Any] = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
elif in_order:
__UpperCamelCase : int = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__UpperCamelCase : List[str] = self.process(__UpperCamelCase , __UpperCamelCase )
self.logger.log(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
state.wait_for_everyone()
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str = None ):
if log_level is None:
__UpperCamelCase : str = os.environ.get("ACCELERATE_LOG_LEVEL" , _lowerCAmelCase )
__UpperCamelCase : Optional[int] = logging.getLogger(_lowerCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(_lowerCAmelCase , {} )
| 370
|
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Any = logging.get_logger()
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : nn.Module
lowercase : List[nn.Module] = field(default_factory=lowerCamelCase__ )
lowercase : list = field(default_factory=lowerCamelCase__ )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase : Optional[int] = len(list(m.modules() ) ) == 1 or isinstance(__UpperCamelCase , nn.Convad ) or isinstance(__UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(__UpperCamelCase )
def __call__( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(__UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
return list(filter(lambda __UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
lowercase : nn.Module
lowercase : nn.Module
lowercase : int = 0
lowercase : List = field(default_factory=lowerCamelCase__ )
lowercase : List = field(default_factory=lowerCamelCase__ )
def __call__( self , __UpperCamelCase ) -> List[str]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = Tracker(self.dest )(__UpperCamelCase ).parametrized
__UpperCamelCase : Union[str, Any] = Tracker(self.src )(__UpperCamelCase ).parametrized
__UpperCamelCase : Union[str, Any] = list(filter(lambda __UpperCamelCase : type(__UpperCamelCase ) not in self.src_skip , __UpperCamelCase ) )
__UpperCamelCase : Any = list(filter(lambda __UpperCamelCase : type(__UpperCamelCase ) not in self.dest_skip , __UpperCamelCase ) )
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(__UpperCamelCase )} operations while'''
f''' destination module has {len(__UpperCamelCase )}.''' )
for dest_m, src_m in zip(__UpperCamelCase , __UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : ResNetConfig , _lowerCAmelCase : Path , _lowerCAmelCase : bool = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
__UpperCamelCase : Optional[Any] = timm.create_model(_lowerCAmelCase , pretrained=_lowerCAmelCase ).eval()
__UpperCamelCase : Union[str, Any] = ResNetForImageClassification(_lowerCAmelCase ).eval()
__UpperCamelCase : Any = ModuleTransfer(src=_lowerCAmelCase , dest=_lowerCAmelCase )
__UpperCamelCase : Optional[int] = torch.randn((1, 3, 2_24, 2_24) )
module_transfer(_lowerCAmelCase )
assert torch.allclose(from_model(_lowerCAmelCase ) , our_model(_lowerCAmelCase ).logits ), "The model logits don't match the original one."
__UpperCamelCase : Tuple = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(_lowerCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add model" , use_temp_dir=_lowerCAmelCase , )
# we can use the convnext one
__UpperCamelCase : List[str] = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="Add image processor" , use_temp_dir=_lowerCAmelCase , )
print(F'''Pushed {checkpoint_name}''' )
def UpperCAmelCase_ (_lowerCAmelCase : Path , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True ):
__UpperCamelCase : str = "imagenet-1k-id2label.json"
__UpperCamelCase : Dict = 10_00
__UpperCamelCase : Any = (1, num_labels)
__UpperCamelCase : Union[str, Any] = "huggingface/label-files"
__UpperCamelCase : List[Any] = num_labels
__UpperCamelCase : Tuple = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase , repo_type="dataset" ) , "r" ) )
__UpperCamelCase : Optional[Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
__UpperCamelCase : List[str] = idalabel
__UpperCamelCase : str = {v: k for k, v in idalabel.items()}
__UpperCamelCase : Dict = partial(_lowerCAmelCase , num_labels=_lowerCAmelCase , idalabel=_lowerCAmelCase , labelaid=_lowerCAmelCase )
__UpperCamelCase : List[str] = {
"resnet18": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet26": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet34": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 1_28, 2_56, 5_12] , layer_type="basic" ),
"resnet50": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet101": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
"resnet152": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[2_56, 5_12, 10_24, 20_48] , layer_type="bottleneck" ),
}
if model_name:
convert_weight_and_push(_lowerCAmelCase , names_to_config[model_name] , _lowerCAmelCase , _lowerCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return config, expected_shape
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported resnet* architecture,"
" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
lowercase : Union[str, Any] = parser.parse_args()
lowercase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 171
| 0
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _a ( ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = "https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"
lowerCAmelCase__ = Image.open(requests.get(UpperCamelCase_ , stream=UpperCamelCase_ ).raw ).convert("RGB" )
return image
def _a ( UpperCamelCase_ : int ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = []
# fmt: off
# vision encoder
rename_keys.append(("visual_encoder.cls_token", "vision_model.embeddings.class_embedding") )
rename_keys.append(("visual_encoder.pos_embed", "vision_model.embeddings.position_embedding") )
rename_keys.append(("visual_encoder.patch_embed.proj.weight", "vision_model.embeddings.patch_embedding.weight") )
rename_keys.append(("visual_encoder.patch_embed.proj.bias", "vision_model.embeddings.patch_embedding.bias") )
rename_keys.append(("ln_vision.weight", "vision_model.post_layernorm.weight") )
rename_keys.append(("ln_vision.bias", "vision_model.post_layernorm.bias") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.weight", F"vision_model.encoder.layers.{i}.layer_norm1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm1.bias", F"vision_model.encoder.layers.{i}.layer_norm1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.weight", F"vision_model.encoder.layers.{i}.layer_norm2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.norm2.bias", F"vision_model.encoder.layers.{i}.layer_norm2.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.qkv.weight", F"vision_model.encoder.layers.{i}.self_attn.qkv.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.weight", F"vision_model.encoder.layers.{i}.self_attn.projection.weight",) )
rename_keys.append((F"visual_encoder.blocks.{i}.attn.proj.bias", F"vision_model.encoder.layers.{i}.self_attn.projection.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.weight", F"vision_model.encoder.layers.{i}.mlp.fc1.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc1.bias", F"vision_model.encoder.layers.{i}.mlp.fc1.bias") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.weight", F"vision_model.encoder.layers.{i}.mlp.fc2.weight") )
rename_keys.append((F"visual_encoder.blocks.{i}.mlp.fc2.bias", F"vision_model.encoder.layers.{i}.mlp.fc2.bias") )
# QFormer
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.weight", "qformer.layernorm.weight") )
rename_keys.append(("Qformer.bert.embeddings.LayerNorm.bias", "qformer.layernorm.bias") )
# fmt: on
return rename_keys
def _a ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ = dct.pop(UpperCamelCase_ )
lowerCAmelCase__ = val
def _a ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase__ = state_dict.pop(F"visual_encoder.blocks.{i}.attn.q_bias" )
lowerCAmelCase__ = state_dict.pop(F"visual_encoder.blocks.{i}.attn.v_bias" )
# next, set bias in the state dict
lowerCAmelCase__ = torch.cat((q_bias, torch.zeros_like(UpperCamelCase_ , requires_grad=UpperCamelCase_ ), v_bias) )
lowerCAmelCase__ = qkv_bias
def _a ( UpperCamelCase_ : Any , UpperCamelCase_ : Any ) -> Any:
"""simple docstring"""
lowerCAmelCase__ = 364 if "coco" in model_name else 224
lowerCAmelCase__ = BlipaVisionConfig(image_size=UpperCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
lowerCAmelCase__ = OPTConfig.from_pretrained("facebook/opt-2.7b" , eos_token_id=UpperCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
lowerCAmelCase__ = OPTConfig.from_pretrained("facebook/opt-6.7b" , eos_token_id=UpperCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
lowerCAmelCase__ = TaConfig.from_pretrained("google/flan-t5-xl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase__ = TaConfig.from_pretrained("google/flan-t5-xxl" , dense_act_fn="gelu" , bos_token_id=1 ).to_dict()
lowerCAmelCase__ = BlipaConfig(vision_config=UpperCamelCase_ , text_config=UpperCamelCase_ )
return config, image_size
@torch.no_grad()
def _a ( UpperCamelCase_ : Dict , UpperCamelCase_ : Tuple=None , UpperCamelCase_ : int=False ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ = (
AutoTokenizer.from_pretrained("facebook/opt-2.7b" )
if "opt" in model_name
else AutoTokenizer.from_pretrained("google/flan-t5-xl" )
)
lowerCAmelCase__ = tokenizer("\n" , add_special_tokens=UpperCamelCase_ ).input_ids[0]
lowerCAmelCase__ , lowerCAmelCase__ = get_blipa_config(UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
lowerCAmelCase__ = BlipaForConditionalGeneration(UpperCamelCase_ ).eval()
lowerCAmelCase__ = {
"blip2-opt-2.7b": ("blip2_opt", "pretrain_opt2.7b"),
"blip2-opt-6.7b": ("blip2_opt", "pretrain_opt6.7b"),
"blip2-opt-2.7b-coco": ("blip2_opt", "caption_coco_opt2.7b"),
"blip2-opt-6.7b-coco": ("blip2_opt", "caption_coco_opt6.7b"),
"blip2-flan-t5-xl": ("blip2_t5", "pretrain_flant5xl"),
"blip2-flan-t5-xl-coco": ("blip2_t5", "caption_coco_flant5xl"),
"blip2-flan-t5-xxl": ("blip2_t5", "pretrain_flant5xxl"),
}
lowerCAmelCase__ , lowerCAmelCase__ = model_name_to_original[model_name]
# load original model
print("Loading original model..." )
lowerCAmelCase__ = "cuda" if torch.cuda.is_available() else "cpu"
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = load_model_and_preprocess(
name=UpperCamelCase_ , model_type=UpperCamelCase_ , is_eval=UpperCamelCase_ , device=UpperCamelCase_ )
original_model.eval()
print("Done!" )
# update state dict keys
lowerCAmelCase__ = original_model.state_dict()
lowerCAmelCase__ = create_rename_keys(UpperCamelCase_ )
for src, dest in rename_keys:
rename_key(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase__ = state_dict.pop(UpperCamelCase_ )
if key.startswith("Qformer.bert" ):
lowerCAmelCase__ = key.replace("Qformer.bert" , "qformer" )
if "attention.self" in key:
lowerCAmelCase__ = key.replace("self" , "attention" )
if "opt_proj" in key:
lowerCAmelCase__ = key.replace("opt_proj" , "language_projection" )
if "t5_proj" in key:
lowerCAmelCase__ = key.replace("t5_proj" , "language_projection" )
if key.startswith("opt" ):
lowerCAmelCase__ = key.replace("opt" , "language" )
if key.startswith("t5" ):
lowerCAmelCase__ = key.replace("t5" , "language" )
lowerCAmelCase__ = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ , lowerCAmelCase__ = hf_model.load_state_dict(UpperCamelCase_ , strict=UpperCamelCase_ )
assert len(UpperCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
lowerCAmelCase__ = load_demo_image()
lowerCAmelCase__ = vis_processors["eval"](UpperCamelCase_ ).unsqueeze(0 ).to(UpperCamelCase_ )
lowerCAmelCase__ = tokenizer(["\n"] , return_tensors="pt" ).input_ids.to(UpperCamelCase_ )
# create processor
lowerCAmelCase__ = BlipImageProcessor(
size={"height": image_size, "width": image_size} , image_mean=UpperCamelCase_ , image_std=UpperCamelCase_ )
lowerCAmelCase__ = BlipaProcessor(image_processor=UpperCamelCase_ , tokenizer=UpperCamelCase_ )
lowerCAmelCase__ = processor(images=UpperCamelCase_ , return_tensors="pt" ).pixel_values.to(UpperCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(UpperCamelCase_ , UpperCamelCase_ )
original_model.to(UpperCamelCase_ )
hf_model.to(UpperCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
lowerCAmelCase__ = original_model({"image": original_pixel_values, "text_input": [""]} ).logits
lowerCAmelCase__ = hf_model(UpperCamelCase_ , UpperCamelCase_ ).logits
else:
lowerCAmelCase__ = original_model(
{"image": original_pixel_values, "text_input": ["\n"], "text_output": ["\n"]} ).logits
lowerCAmelCase__ = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase__ = hf_model(UpperCamelCase_ , UpperCamelCase_ , labels=UpperCamelCase_ ).logits
assert original_logits.shape == logits.shape
print("First values of original logits:" , original_logits[0, :3, :3] )
print("First values of HF logits:" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
lowerCAmelCase__ = torch.tensor(
[[-41.5_850, -4.4_440, -8.9_922], [-47.4_322, -5.9_143, -1.7_340]] , device=UpperCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , UpperCamelCase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
lowerCAmelCase__ = torch.tensor(
[[-57.0_109, -9.8_967, -12.6_280], [-68.6_578, -12.7_191, -10.5_065]] , device=UpperCamelCase_ )
else:
# cast to same type
lowerCAmelCase__ = logits.dtype
assert torch.allclose(original_logits.to(UpperCamelCase_ ) , UpperCamelCase_ , atol=1e-2 )
print("Looks ok!" )
print("Generating a caption..." )
lowerCAmelCase__ = ""
lowerCAmelCase__ = tokenizer(UpperCamelCase_ , return_tensors="pt" ).input_ids.to(UpperCamelCase_ )
lowerCAmelCase__ = original_model.generate({"image": original_pixel_values} )
lowerCAmelCase__ = hf_model.generate(
UpperCamelCase_ , UpperCamelCase_ , do_sample=UpperCamelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("Original generation:" , UpperCamelCase_ )
lowerCAmelCase__ = input_ids.shape[1]
lowerCAmelCase__ = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=UpperCamelCase_ )
lowerCAmelCase__ = [text.strip() for text in output_text]
print("HF generation:" , UpperCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase_ )
hf_model.save_pretrained(UpperCamelCase_ )
if push_to_hub:
processor.push_to_hub(F"nielsr/{model_name}" )
hf_model.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
a_ = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
a_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 340
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _a ( UpperCamelCase_ : int = 3 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(UpperCamelCase_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCAmelCase__ = QuantumRegister(UpperCamelCase_ , "qr" )
lowerCAmelCase__ = ClassicalRegister(UpperCamelCase_ , "cr" )
lowerCAmelCase__ = QuantumCircuit(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase__ = number_of_qubits
for i in range(UpperCamelCase_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(UpperCamelCase_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , UpperCamelCase_ , UpperCamelCase_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(UpperCamelCase_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(UpperCamelCase_ , UpperCamelCase_ )
# simulate with 10000 shots
lowerCAmelCase__ = Aer.get_backend("qasm_simulator" )
lowerCAmelCase__ = execute(UpperCamelCase_ , UpperCamelCase_ , shots=10_000 )
return job.result().get_counts(UpperCamelCase_ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"
)
| 340
| 1
|
def lowerCamelCase_ (UpperCamelCase__ : int = 1000 ):
_UpperCAmelCase : Tuple = -1
_UpperCAmelCase : Optional[Any] = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
_UpperCAmelCase : Optional[int] = (n * n - 2 * a * n) // (2 * n - 2 * a)
_UpperCAmelCase : List[str] = n - a - b
if c * c == (a * a + b * b):
_UpperCAmelCase : str = a * b * c
if candidate >= product:
_UpperCAmelCase : Union[str, Any] = candidate
return product
if __name__ == "__main__":
print(f"{solution() = }")
| 368
|
"""simple docstring"""
from itertools import count
def lowerCamelCase_ (UpperCamelCase__ : int = 50 ):
_UpperCAmelCase : Tuple = [1] * min_block_length
for n in count(UpperCamelCase__ ):
fill_count_functions.append(1 )
for block_length in range(UpperCamelCase__ , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(f"{solution() = }")
| 68
| 0
|
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 157
|
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _snake_case ( _lowercase ):
lowerCamelCase__: Tuple = ["image_processor", "tokenizer"]
lowerCamelCase__: Tuple = "BlipImageProcessor"
lowerCamelCase__: str = "AutoTokenizer"
def __init__( self: Optional[Any] , __lowerCamelCase: str , __lowerCamelCase: int ) -> List[str]:
__UpperCAmelCase : Dict = False
super().__init__(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : List[Any] = self.image_processor
def __call__( self: Tuple , __lowerCamelCase: ImageInput = None , __lowerCamelCase: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __lowerCamelCase: bool = True , __lowerCamelCase: Union[bool, str, PaddingStrategy] = False , __lowerCamelCase: Union[bool, str, TruncationStrategy] = None , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: int = 0 , __lowerCamelCase: Optional[int] = None , __lowerCamelCase: Optional[bool] = None , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = False , __lowerCamelCase: bool = True , __lowerCamelCase: Optional[Union[str, TensorType]] = None , **__lowerCamelCase: Optional[int] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
__UpperCAmelCase : int = self.tokenizer
__UpperCAmelCase : Optional[int] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
return text_encoding
# add pixel_values
__UpperCAmelCase : List[str] = self.image_processor(__lowerCamelCase , return_tensors=__lowerCamelCase )
if text is not None:
__UpperCAmelCase : Optional[int] = self.tokenizer(
text=__lowerCamelCase , add_special_tokens=__lowerCamelCase , padding=__lowerCamelCase , truncation=__lowerCamelCase , max_length=__lowerCamelCase , stride=__lowerCamelCase , pad_to_multiple_of=__lowerCamelCase , return_attention_mask=__lowerCamelCase , return_overflowing_tokens=__lowerCamelCase , return_special_tokens_mask=__lowerCamelCase , return_offsets_mapping=__lowerCamelCase , return_token_type_ids=__lowerCamelCase , return_length=__lowerCamelCase , verbose=__lowerCamelCase , return_tensors=__lowerCamelCase , **__lowerCamelCase , )
else:
__UpperCAmelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(__lowerCamelCase )
return encoding_image_processor
def _lowerCamelCase ( self: Union[str, Any] , *__lowerCamelCase: Union[str, Any] , **__lowerCamelCase: List[Any] ) -> Dict:
return self.tokenizer.batch_decode(*__lowerCamelCase , **__lowerCamelCase )
def _lowerCamelCase ( self: Any , *__lowerCamelCase: str , **__lowerCamelCase: Dict ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowerCamelCase , **__lowerCamelCase )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _lowerCamelCase ( self: List[Any] ) -> List[str]:
__UpperCAmelCase : int = self.tokenizer.model_input_names
__UpperCAmelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 157
| 1
|
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
lowerCAmelCase_ = ''
lowerCAmelCase_ = ''
lowerCAmelCase_ = ''
lowerCAmelCase_ = 1 # (0 is vertical, 1 is horizontal)
def __UpperCAmelCase ( ) -> None:
lowercase__ , lowercase__ : List[Any] = get_dataset(__lowerCamelCase , __lowerCamelCase )
print('''Processing...''' )
lowercase__ , lowercase__ , lowercase__ : Optional[Any] = update_image_and_anno(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for index, image in enumerate(__lowerCamelCase ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowercase__ : str = random_chars(32 )
lowercase__ : int = paths[index].split(os.sep )[-1].rsplit('''.''' , 1 )[0]
lowercase__ : Dict = f"""{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}"""
cva.imwrite(f"""/{file_root}.jpg""" , __lowerCamelCase , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(f"""Success {index+1}/{len(__lowerCamelCase )} with {file_name}""" )
lowercase__ : Optional[int] = []
for anno in new_annos[index]:
lowercase__ : Tuple = f"""{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}"""
annos_list.append(__lowerCamelCase )
with open(f"""/{file_root}.txt""" , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> tuple[list, list]:
lowercase__ : Dict = []
lowercase__ : Tuple = []
for label_file in glob.glob(os.path.join(__lowerCamelCase , '''*.txt''' ) ):
lowercase__ : str = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(__lowerCamelCase ) as in_file:
lowercase__ : Any = in_file.readlines()
lowercase__ : str = os.path.join(__lowerCamelCase , f"""{label_name}.jpg""" )
lowercase__ : Optional[Any] = []
for obj_list in obj_lists:
lowercase__ : Dict = obj_list.rstrip('''\n''' ).split(''' ''' )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(__lowerCamelCase )
labels.append(__lowerCamelCase )
return img_paths, labels
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = 1 ) -> tuple[list, list, list]:
lowercase__ : Union[str, Any] = []
lowercase__ : Union[str, Any] = []
lowercase__ : Optional[Any] = []
for idx in range(len(__lowerCamelCase ) ):
lowercase__ : Any = []
lowercase__ : Tuple = img_list[idx]
path_list.append(__lowerCamelCase )
lowercase__ : List[str] = anno_list[idx]
lowercase__ : Tuple = cva.imread(__lowerCamelCase )
if flip_type == 1:
lowercase__ : Optional[int] = cva.flip(__lowerCamelCase , __lowerCamelCase )
for bbox in img_annos:
lowercase__ : List[str] = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
lowercase__ : Optional[int] = cva.flip(__lowerCamelCase , __lowerCamelCase )
for bbox in img_annos:
lowercase__ : str = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(__lowerCamelCase )
new_imgs_list.append(__lowerCamelCase )
return new_imgs_list, new_annos_lists, path_list
def __UpperCAmelCase ( __lowerCamelCase = 32 ) -> str:
assert number_char > 1, "The number of character should greater than 1"
lowercase__ : Tuple = ascii_lowercase + digits
return "".join(random.choice(__lowerCamelCase ) for _ in range(__lowerCamelCase ) )
if __name__ == "__main__":
main()
print('DONE ✅')
| 302
|
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCAmelCase : Optional[str] = field(
default="NER" ,metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCAmelCase : bool = field(default=A_ ,metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} ,)
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : str = field(
metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} )
lowerCAmelCase : Optional[str] = field(
default=A_ ,metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} ,)
lowerCAmelCase : int = field(
default=1_2_8 ,metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} ,)
lowerCAmelCase : bool = field(
default=A_ ,metadata={"help": "Overwrite the cached training and evaluation sets"} )
def __UpperCAmelCase ( ) -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[str] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowercase__ , lowercase__ , lowercase__ : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
lowercase__ : str = import_module('''tasks''' )
try:
lowercase__ : List[str] = getattr(__lowerCamelCase , model_args.task_type )
lowercase__ : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """
f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , __lowerCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowercase__ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowercase__ : Dict[int, str] = dict(enumerate(__lowerCamelCase ) )
lowercase__ : Optional[int] = len(__lowerCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase__ : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__lowerCamelCase , idalabel=__lowerCamelCase , labelaid={label: i for i, label in enumerate(__lowerCamelCase )} , cache_dir=model_args.cache_dir , )
lowercase__ : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowercase__ : str = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__lowerCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowercase__ : str = (
TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__lowerCamelCase , __lowerCamelCase ) -> Tuple[List[int], List[int]]:
lowercase__ : Tuple = np.argmax(__lowerCamelCase , axis=2 )
lowercase__ , lowercase__ : Tuple = preds.shape
lowercase__ : List[str] = [[] for _ in range(__lowerCamelCase )]
lowercase__ : Tuple = [[] for _ in range(__lowerCamelCase )]
for i in range(__lowerCamelCase ):
for j in range(__lowerCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__lowerCamelCase ) -> Dict:
lowercase__ , lowercase__ : List[Any] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(__lowerCamelCase , __lowerCamelCase ),
"precision": precision_score(__lowerCamelCase , __lowerCamelCase ),
"recall": recall_score(__lowerCamelCase , __lowerCamelCase ),
"f1": fa_score(__lowerCamelCase , __lowerCamelCase ),
}
# Data collator
lowercase__ : Tuple = DataCollatorWithPadding(__lowerCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowercase__ : str = Trainer(
model=__lowerCamelCase , args=__lowerCamelCase , train_dataset=__lowerCamelCase , eval_dataset=__lowerCamelCase , compute_metrics=__lowerCamelCase , data_collator=__lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase__ : int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowercase__ : Optional[int] = trainer.evaluate()
lowercase__ : Union[str, Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__lowerCamelCase )
# Predict
if training_args.do_predict:
lowercase__ : Optional[int] = TokenClassificationDataset(
token_classification_task=__lowerCamelCase , data_dir=data_args.data_dir , tokenizer=__lowerCamelCase , labels=__lowerCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = trainer.predict(__lowerCamelCase )
lowercase__ , lowercase__ : Tuple = align_predictions(__lowerCamelCase , __lowerCamelCase )
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , __lowerCamelCase , __lowerCamelCase )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowercase__ : Dict = os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(__lowerCamelCase , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return results
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 302
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : str=7 , _UpperCAmelCase : str=True , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Any=99 , _UpperCAmelCase : Optional[Any]=32 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : int=4 , _UpperCAmelCase : Optional[int]=37 , _UpperCAmelCase : Optional[Any]="gelu" , _UpperCAmelCase : Dict=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : int=5_12 , _UpperCAmelCase : int=16 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Union[str, Any]=4 , ):
"""simple docstring"""
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_attention_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_choices
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_attention_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
if self.use_token_type_ids:
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = self.prepare_config_and_inputs()
UpperCAmelCase__ = config_and_inputs
UpperCAmelCase__ = True
UpperCAmelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : str = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaPreLayerNormModelTester(self )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
UpperCAmelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
UpperCAmelCase__ = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
UpperCAmelCase__ = model(_lowerCAmelCase )[0]
UpperCAmelCase__ = [1, 11, 5_02_65]
self.assertEqual(list(output.shape ) , _lowerCAmelCase )
# compare the actual values for a slice.
UpperCAmelCase__ = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=_lowerCAmelCase )
UpperCAmelCase__ = np.array([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] , dtype=jnp.intaa )
UpperCAmelCase__ = model(_lowerCAmelCase )[0]
# compare the actual values for a slice.
UpperCAmelCase__ = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 346
|
import numpy
# List of input, output pairs
_UpperCAmelCase : List[str] = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_UpperCAmelCase : Optional[Any] = (((515, 22, 13), 555), ((61, 35, 49), 150))
_UpperCAmelCase : Tuple = [2, 4, 1, 5]
_UpperCAmelCase : Union[str, Any] = len(train_data)
_UpperCAmelCase : Dict = 0.0_0_9
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase="train" ):
return calculate_hypothesis_value(lowerCamelCase, lowerCamelCase ) - output(
lowerCamelCase, lowerCamelCase )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = 0
for i in range(len(lowerCamelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase=m ):
lowercase :Union[str, Any] = 0
for i in range(lowerCamelCase ):
if index == -1:
summation_value += _error(lowerCamelCase )
else:
summation_value += _error(lowerCamelCase ) * train_data[i][0][index]
return summation_value
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = summation_of_cost_derivative(lowerCamelCase, lowerCamelCase ) / m
return cost_derivative_value
def UpperCAmelCase__ ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowercase :str = 0.000_002
lowercase :Tuple = 0
lowercase :Optional[int] = 0
while True:
j += 1
lowercase :Union[str, Any] = [0, 0, 0, 0]
for i in range(0, len(lowerCamelCase ) ):
lowercase :Dict = get_cost_derivative(i - 1 )
lowercase :Optional[Any] = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCamelCase, lowerCamelCase, atol=lowerCamelCase, rtol=lowerCamelCase, ):
break
lowercase :Union[str, Any] = temp_parameter_vector
print(("Number of iterations:", j) )
def UpperCAmelCase__ ( ):
for i in range(len(lowerCamelCase ) ):
print(("Actual output value:", output(lowerCamelCase, "test" )) )
print(("Hypothesis output:", calculate_hypothesis_value(lowerCamelCase, "test" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 236
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase_ = logging.get_logger(__name__)
def UpperCamelCase( lowercase_ , lowercase_=False ) -> Any:
'''simple docstring'''
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=False ) -> str:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ''
else:
snake_case_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' )
snake_case_ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def UpperCamelCase( lowercase_ ) -> Dict:
'''simple docstring'''
snake_case_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
snake_case_ = dct.pop(lowerCAmelCase_ )
snake_case_ = val
def UpperCamelCase( ) -> Optional[int]:
'''simple docstring'''
snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case_ = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_=True ) -> Dict:
'''simple docstring'''
snake_case_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case_ = 8
# set labels if required
if not base_model:
snake_case_ = 1000
snake_case_ = 'huggingface/label-files'
snake_case_ = 'imagenet-1k-id2label.json'
snake_case_ = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="""dataset""" ) , """r""" ) )
snake_case_ = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 12
snake_case_ = 6
# load original model from torch hub
snake_case_ = torch.hub.load("""facebookresearch/dino:main""" , lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
snake_case_ = create_rename_keys(lowerCAmelCase_ , base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# load HuggingFace model
if base_model:
snake_case_ = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ ).eval()
else:
snake_case_ = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case_ = ViTImageProcessor()
snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case_ = encoding['pixel_values']
snake_case_ = model(lowerCAmelCase_ )
if base_model:
snake_case_ = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_ , outputs.last_hidden_state[:, 0, :] , atol=1e-1 )
else:
snake_case_ = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1e-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase_ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
lowerCamelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 368
|
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def UpperCamelCase( lowercase_ , lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
snake_case_ = AutoConfig.from_pretrained(lowercase_ )
snake_case_ = FlaxAutoModelForSeqaSeqLM.from_config(config=lowercase_ )
snake_case_ = checkpoints.load_tax_checkpoint(lowercase_ )
snake_case_ = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
snake_case_ = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
snake_case_ = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
snake_case_ = f'''layers_{str(lowercase_ )}'''
# Self-Attention
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
snake_case_ = flax_model.params["""encoder"""]["""block"""][str(lowercase_ )]["""layer"""]
snake_case_ = tax_attention_key
snake_case_ = tax_attention_out
snake_case_ = tax_attention_query
snake_case_ = tax_attention_value
snake_case_ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_global_layer_norm
if split_mlp_wi:
snake_case_ = tax_mlp_wi_a
snake_case_ = tax_mlp_wi_a
else:
snake_case_ = tax_mlp_wi
snake_case_ = tax_mlp_wo
snake_case_ = tax_mlp_layer_norm
snake_case_ = flax_model_encoder_layer_block
# Only for layer 0:
snake_case_ = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
snake_case_ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
snake_case_ = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
snake_case_ = tax_encoder_global_rel_embedding
# Assigning
snake_case_ = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
snake_case_ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
snake_case_ = f'''layers_{str(lowercase_ )}'''
# Self-Attention
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
snake_case_ = tax_enc_dec_attention_module["""key"""]["""kernel"""]
snake_case_ = tax_enc_dec_attention_module["""out"""]["""kernel"""]
snake_case_ = tax_enc_dec_attention_module["""query"""]["""kernel"""]
snake_case_ = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
snake_case_ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
snake_case_ = flax_model.params["""decoder"""]["""block"""][str(lowercase_ )]["""layer"""]
snake_case_ = tax_attention_key
snake_case_ = tax_attention_out
snake_case_ = tax_attention_query
snake_case_ = tax_attention_value
snake_case_ = tax_pre_attention_layer_norm
snake_case_ = tax_enc_dec_attention_key
snake_case_ = tax_enc_dec_attention_out
snake_case_ = tax_enc_dec_attention_query
snake_case_ = tax_enc_dec_attention_value
snake_case_ = tax_cross_layer_norm
if split_mlp_wi:
snake_case_ = tax_mlp_wi_a
snake_case_ = tax_mlp_wi_a
else:
snake_case_ = tax_mlp_wi
snake_case_ = tax_mlp_wo
snake_case_ = txa_mlp_layer_norm
snake_case_ = flax_model_decoder_layer_block
# Decoder Normalization
snake_case_ = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
snake_case_ = txa_decoder_norm
# Only for layer 0:
snake_case_ = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
snake_case_ = tax_decoder_rel_embedding
# Token Embeddings
snake_case_ = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
snake_case_ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
snake_case_ = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(lowercase_ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
lowerCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path the T5X checkpoint.'''
)
parser.add_argument('''--config_name''', default=None, type=str, required=True, help='''Config name of LongT5/T5 model.''')
parser.add_argument(
'''--flax_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output FLAX model.'''
)
lowerCamelCase_ = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 34
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A ( unittest.TestCase ):
__UpperCAmelCase : Dict = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : int = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
__UpperCAmelCase : int = ['accelerate', 'launch']
__UpperCAmelCase : Optional[Any] = Path.home() / '.cache/huggingface/accelerate'
__UpperCAmelCase : Union[str, Any] = 'default_config.yaml'
__UpperCAmelCase : Optional[int] = config_folder / config_file
__UpperCAmelCase : int = config_folder / '_default_config.yaml'
__UpperCAmelCase : List[Any] = Path('tests/test_configs' )
@classmethod
def lowercase_ (cls : Dict ) -> str:
"""simple docstring"""
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase_ (cls : str ) -> Any:
"""simple docstring"""
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase_ (self : Dict ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase_ (self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=_lowerCamelCase ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(_lowerCamelCase ), self.test_file_path] , env=os.environ.copy() )
def lowercase_ (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class A ( unittest.TestCase ):
__UpperCAmelCase : List[str] = 'test-tpu'
__UpperCAmelCase : Any = 'us-central1-a'
__UpperCAmelCase : List[str] = 'ls'
__UpperCAmelCase : Tuple = ['accelerate', 'tpu-config']
__UpperCAmelCase : int = 'cd /usr/share'
__UpperCAmelCase : Optional[Any] = 'tests/test_samples/test_command_file.sh'
__UpperCAmelCase : Tuple = 'Running gcloud compute tpus tpu-vm ssh'
def lowercase_ (self : str ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowerCamelCase , )
def lowercase_ (self : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowerCamelCase , )
def lowercase_ (self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=_lowerCamelCase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
def lowercase_ (self : Dict ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowerCamelCase , )
def lowercase_ (self : Optional[Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowerCamelCase , )
def lowercase_ (self : Optional[Any] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
def lowercase_ (self : List[Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
def lowercase_ (self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
def lowercase_ (self : Union[str, Any] ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=_lowerCamelCase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowerCamelCase , )
| 65
|
"""simple docstring"""
import os
from datetime import datetime as dt
from github import Github
_A = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def a__ ( ) -> str:
UpperCAmelCase__ : Union[str, Any] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Dict = g.get_repo("""huggingface/accelerate""" )
UpperCAmelCase__ : str = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Optional[Any] = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase )
UpperCAmelCase__ : List[Any] = comments[0] if len(lowerCAmelCase ) > 0 else None
UpperCAmelCase__ : Optional[Any] = dt.utcnow()
UpperCAmelCase__ : List[str] = (current_time - issue.updated_at).days
UpperCAmelCase__ : Optional[int] = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 171
| 0
|
"""simple docstring"""
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _A ( UpperCamelCase_ : List[str], UpperCamelCase_ : str, UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : int) -> Dict:
'''simple docstring'''
__lowercase = s.rsplit(_UpperCamelCase, _UpperCamelCase)
return new.join(_UpperCamelCase)
def _A ( UpperCamelCase_ : int) -> List[str]:
'''simple docstring'''
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items())
def _A ( UpperCamelCase_ : int) -> List[str]:
'''simple docstring'''
__lowercase = {}
__lowercase = ["group_1", "group_2", "group_3", "group_4"]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
__lowercase = key.replace(F"""{group_key}.""", F"""{group_key}.group.""")
if "res_path" in key:
__lowercase = key.replace("res_path.", "res_path.path.")
if key.endswith(".w"):
__lowercase = rreplace(_UpperCamelCase, ".w", ".weight", 1)
if key.endswith(".b"):
__lowercase = rreplace(_UpperCamelCase, ".b", ".bias", 1)
__lowercase = value.float()
return upgrade
@torch.no_grad()
def _A ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Union[str, Any], UpperCamelCase_ : Dict=None, UpperCamelCase_ : Optional[Any]=True) -> Any:
'''simple docstring'''
from dall_e import Encoder
__lowercase = Encoder()
if os.path.exists(_UpperCamelCase):
__lowercase = torch.load(_UpperCamelCase)
else:
__lowercase = torch.hub.load_state_dict_from_url(_UpperCamelCase)
if isinstance(_UpperCamelCase, _UpperCamelCase):
__lowercase = ckpt.state_dict()
encoder.load_state_dict(_UpperCamelCase)
if config_path is not None:
__lowercase = FlavaImageCodebookConfig.from_pretrained(_UpperCamelCase)
else:
__lowercase = FlavaImageCodebookConfig()
__lowercase = FlavaImageCodebook(_UpperCamelCase).eval()
__lowercase = encoder.state_dict()
__lowercase = upgrade_state_dict(_UpperCamelCase)
hf_model.load_state_dict(_UpperCamelCase)
__lowercase = hf_model.state_dict()
__lowercase = count_parameters(_UpperCamelCase)
__lowercase = count_parameters(_UpperCamelCase)
assert torch.allclose(_UpperCamelCase, _UpperCamelCase, atol=1E-3)
if save_checkpoint:
hf_model.save_pretrained(_UpperCamelCase)
else:
return hf_state_dict
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
_a = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 368
|
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'LayoutLMv2Config', 'LayoutLMv3Config'}
@is_pipeline_test
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
__UpperCAmelCase : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
__UpperCAmelCase : Optional[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
__UpperCAmelCase : Tuple = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def _lowercase ( self : Optional[int] ):
__lowercase = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt" )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
__lowercase = text_classifier("This is great !", top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
__lowercase = text_classifier(["This is great !", "This is bad"], top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
], )
__lowercase = text_classifier("This is great !", top_k=1 )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
__lowercase = text_classifier("This is great !", return_all_scores=UpperCAmelCase__ )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
__lowercase = text_classifier("This is great !", return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
__lowercase = text_classifier(["This is great !", "Something else"], return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
], )
__lowercase = text_classifier(["This is great !", "Something else"], return_all_scores=UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
], )
@require_torch
def _lowercase ( self : Dict ):
import torch
__lowercase = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="pt", device=torch.device("cpu" ), )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def _lowercase ( self : Union[str, Any] ):
__lowercase = pipeline(
task="text-classification", model="hf-internal-testing/tiny-random-distilbert", framework="tf" )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def _lowercase ( self : Dict ):
__lowercase = pipeline("text-classification" )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 1.0}] )
__lowercase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "NEGATIVE", "score": 1.0}] )
__lowercase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def _lowercase ( self : Tuple ):
__lowercase = pipeline("text-classification", framework="tf" )
__lowercase = text_classifier("This is great !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 1.0}] )
__lowercase = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "NEGATIVE", "score": 1.0}] )
__lowercase = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": "POSITIVE", "score": 0.988}] )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : str, UpperCAmelCase__ : int, UpperCAmelCase__ : Tuple ):
__lowercase = TextClassificationPipeline(model=UpperCAmelCase__, tokenizer=UpperCAmelCase__ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def _lowercase ( self : Any, UpperCAmelCase__ : List[Any], UpperCAmelCase__ : str ):
__lowercase = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
__lowercase = "HuggingFace is in"
__lowercase = text_classifier(UpperCAmelCase__ )
self.assertEqual(nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
__lowercase = ["HuggingFace is in ", "Paris is in France"]
__lowercase = text_classifier(UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}, {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}], )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
__lowercase = text_classifier(UpperCAmelCase__, top_k=UpperCAmelCase__ )
__lowercase = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [[{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N, [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}] * N], )
__lowercase = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
__lowercase = text_classifier(UpperCAmelCase__ )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), {"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}, )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
__lowercase = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(UpperCAmelCase__ ):
text_classifier(UpperCAmelCase__ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
__lowercase = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(UpperCAmelCase__ ), [{"label": ANY(UpperCAmelCase__ ), "score": ANY(UpperCAmelCase__ )}], )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 144
| 0
|
'''simple docstring'''
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
snake_case_ : str = logging.getLogger(__name__)
def A__ ( ):
_UpperCamelCase : List[Any] = argparse.ArgumentParser(
description='Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).' )
parser.add_argument('--file_path' , type=UpperCAmelCase_ , default='data/dump.txt' , help='The path to the data.' )
parser.add_argument('--tokenizer_type' , type=UpperCAmelCase_ , default='bert' , choices=['bert', 'roberta', 'gpt2'] )
parser.add_argument('--tokenizer_name' , type=UpperCAmelCase_ , default='bert-base-uncased' , help='The tokenizer to use.' )
parser.add_argument('--dump_file' , type=UpperCAmelCase_ , default='data/dump' , help='The dump file prefix.' )
_UpperCamelCase : Any = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
_UpperCamelCase : Optional[int] = BertTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[int] = tokenizer.special_tokens_map['cls_token'] # `[CLS]`
_UpperCamelCase : Dict = tokenizer.special_tokens_map['sep_token'] # `[SEP]`
elif args.tokenizer_type == "roberta":
_UpperCamelCase : List[Any] = RobertaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Any = tokenizer.special_tokens_map['cls_token'] # `<s>`
_UpperCamelCase : int = tokenizer.special_tokens_map['sep_token'] # `</s>`
elif args.tokenizer_type == "gpt2":
_UpperCamelCase : Optional[int] = GPTaTokenizer.from_pretrained(args.tokenizer_name )
_UpperCamelCase : Optional[Any] = tokenizer.special_tokens_map['bos_token'] # `<|endoftext|>`
_UpperCamelCase : Any = tokenizer.special_tokens_map['eos_token'] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , 'r' , encoding='utf8' ) as fp:
_UpperCamelCase : List[Any] = fp.readlines()
logger.info('Start encoding' )
logger.info(f'{len(UpperCAmelCase_ )} examples to process.' )
_UpperCamelCase : int = []
_UpperCamelCase : Any = 0
_UpperCamelCase : Any = 1_0_0_0_0
_UpperCamelCase : Optional[Any] = time.time()
for text in data:
_UpperCamelCase : List[Any] = f'{bos} {text.strip()} {sep}'
_UpperCamelCase : Any = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
rslt.append(UpperCAmelCase_ )
iter += 1
if iter % interval == 0:
_UpperCamelCase : Union[str, Any] = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
_UpperCamelCase : Tuple = time.time()
logger.info('Finished binarization' )
logger.info(f'{len(UpperCAmelCase_ )} examples processed.' )
_UpperCamelCase : Optional[int] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
_UpperCamelCase : List[str] = tokenizer.vocab_size
if vocab_size < (1 << 1_6):
_UpperCamelCase : List[Any] = [np.uintaa(UpperCAmelCase_ ) for d in rslt]
else:
_UpperCamelCase : Any = [np.intaa(UpperCAmelCase_ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(UpperCAmelCase_ , 'wb' ) as handle:
pickle.dump(rslt_ , UpperCAmelCase_ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 83
|
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a__ :
"""simple docstring"""
__lowerCamelCase = BlenderbotSmallConfig
__lowerCamelCase = {}
__lowerCamelCase = 'gelu'
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=False , lowercase=99 , lowercase=32 , lowercase=2 , lowercase=4 , lowercase=37 , lowercase=0.1 , lowercase=0.1 , lowercase=20 , lowercase=2 , lowercase=1 , lowercase=0 , ) -> Any:
'''simple docstring'''
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
A__ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
A__ = tf.concat([input_ids, eos_tensor] , axis=1 )
A__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A__ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
A__ = prepare_blenderbot_small_inputs_dict(lowercase , lowercase , lowercase )
return config, inputs_dict
def UpperCamelCase ( self , lowercase , lowercase ) -> str:
'''simple docstring'''
A__ = TFBlenderbotSmallModel(config=lowercase ).get_decoder()
A__ = inputs_dict["input_ids"]
A__ = input_ids[:1, :]
A__ = inputs_dict["attention_mask"][:1, :]
A__ = inputs_dict["head_mask"]
A__ = 1
# first forward pass
A__ = model(lowercase , attention_mask=lowercase , head_mask=lowercase , use_cache=lowercase )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
A__ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
A__ = tf.concat([input_ids, next_tokens] , axis=-1 )
A__ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
A__ = model(lowercase , attention_mask=lowercase )[0]
A__ = model(lowercase , attention_mask=lowercase , past_key_values=lowercase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
A__ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
A__ = output_from_no_past[:, -3:, random_slice_idx]
A__ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowercase , lowercase , rtol=1e-3 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int , SCREAMING_SNAKE_CASE_: List[Any] , SCREAMING_SNAKE_CASE_: Optional[Any] , SCREAMING_SNAKE_CASE_: Optional[Any]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Optional[int]=None , SCREAMING_SNAKE_CASE_: Dict=None , SCREAMING_SNAKE_CASE_: List[str]=None , ) -> List[Any]:
'''simple docstring'''
if attention_mask is None:
A__ = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
A__ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
A__ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
__lowerCamelCase = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
__lowerCamelCase = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCamelCase = True
__lowerCamelCase = False
__lowerCamelCase = False
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = TFBlenderbotSmallModelTester(self )
A__ = ConfigTester(self , config_class=lowercase )
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
A__ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowercase )
@require_tokenizers
@require_tf
class a__ ( unittest.TestCase ):
"""simple docstring"""
__lowerCamelCase = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
__lowerCamelCase = 'facebook/blenderbot_small-90M'
@cached_property
def UpperCamelCase ( self ) -> Tuple:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" )
@cached_property
def UpperCamelCase ( self ) -> Any:
'''simple docstring'''
A__ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def UpperCamelCase ( self ) -> int:
'''simple docstring'''
A__ = self.tokenizer(self.src_text , return_tensors="tf" )
A__ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase , )
A__ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 68
| 0
|
_A = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCamelCase__ ( a__ : str ) -> int:
UpperCamelCase_ = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
UpperCamelCase_ = Stack()
UpperCamelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
UpperCamelCase_ = operator_stack.peek()
operator_stack.pop()
UpperCamelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase_ = operand_stack.peek()
operand_stack.pop()
UpperCamelCase_ = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 261
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_A = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 261
| 1
|
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def __init__( self : Tuple , __lowercase : int = 101 ):
'''simple docstring'''
__a = length
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.length
def __getitem__( self : List[str] , __lowercase : Tuple ):
'''simple docstring'''
return i
class SCREAMING_SNAKE_CASE :
def __call__( self : Optional[int] , __lowercase : List[str] ):
'''simple docstring'''
return {"input_ids": torch.tensor(__lowercase ), "labels": torch.tensor(__lowercase )}
class SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
__a = nn.Linear(120 , 80 )
def UpperCamelCase_ ( self : Dict , __lowercase : Optional[Any] , __lowercase : Optional[int]=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@require_torch_neuroncore
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = F"--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
__a = self.get_auto_remove_tmp_dir()
__a = F"--output_dir {output_dir}".split()
__a = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
@require_torch_multi_gpu
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = F"--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n ".split()
__a = self.get_auto_remove_tmp_dir()
__a = F"--output_dir {output_dir}".split()
__a = ["""torchrun"""] + distributed_args + args
execute_subprocess_async(__lowercase , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowerCamelCase__ = HfArgumentParser((TrainingArguments,))
lowerCamelCase__ = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, """
F"""distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"""
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
lowerCamelCase__ = DummyDataset(dataset_length)
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : EvalPrediction ):
"""simple docstring"""
__a = list(range(len(_SCREAMING_SNAKE_CASE ) ) )
__a = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
f"{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}" )
return {"success": success}
lowerCamelCase__ = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowerCamelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ = 2
lowerCamelCase__ = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowerCamelCase__ = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowerCamelCase__ = None
| 302
|
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Optional[int] =(IPNDMScheduler,)
__lowerCamelCase : int =(('num_inference_steps', 50),)
def UpperCamelCase_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
__a = {"""num_train_timesteps""": 1000}
config.update(**__lowercase )
return config
def UpperCamelCase_ ( self : Any , __lowercase : Tuple=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : str , __lowercase : int=0 , **__lowercase : Dict ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
scheduler.set_timesteps(__lowercase )
# copy over dummy past residuals (must be after setting timesteps)
__a = dummy_past_residuals[:]
if time_step is None:
__a = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowercase )
__a = scheduler_class.from_pretrained(__lowercase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowercase )
# copy over dummy past residual (must be after setting timesteps)
__a = dummy_past_residuals[:]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = new_scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCamelCase_ ( self : List[str] , **__lowercase : Dict ):
'''simple docstring'''
__a = self.scheduler_classes[0]
__a = self.get_scheduler_config(**__lowercase )
__a = scheduler_class(**__lowercase )
__a = 10
__a = self.dummy_model()
__a = self.dummy_sample_deter
scheduler.set_timesteps(__lowercase )
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
__a = model(__lowercase , __lowercase )
__a = scheduler.step(__lowercase , __lowercase , __lowercase ).prev_sample
return sample
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
__a = dict(self.forward_default_kwargs )
__a = kwargs.pop("""num_inference_steps""" , __lowercase )
for scheduler_class in self.scheduler_classes:
__a = self.get_scheduler_config()
__a = scheduler_class(**__lowercase )
__a = self.dummy_sample
__a = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowercase , """set_timesteps""" ):
scheduler.set_timesteps(__lowercase )
elif num_inference_steps is not None and not hasattr(__lowercase , """set_timesteps""" ):
__a = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__a = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__a = dummy_past_residuals[:]
__a = scheduler.timesteps[5]
__a = scheduler.timesteps[6]
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
__a = scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowercase , time_step=__lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.full_loop()
__a = torch.mean(torch.abs(__lowercase ) )
assert abs(result_mean.item() - 2540529 ) < 10
| 302
| 1
|
'''simple docstring'''
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__lowercase: List[str] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(_UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : Any=0 , _UpperCamelCase : Optional[int]=None ):
UpperCamelCase__ = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
UpperCamelCase__ = math.floor(val / multiple ) * multiple
if x < min_val:
UpperCamelCase__ = math.ceil(val / multiple ) * multiple
return x
UpperCamelCase__ = (output_size, output_size) if isinstance(_UpperCamelCase , _UpperCamelCase ) else output_size
UpperCamelCase__ , UpperCamelCase__ = get_image_size(_UpperCamelCase )
UpperCamelCase__ , UpperCamelCase__ = output_size
# determine new height and width
UpperCamelCase__ = output_height / input_height
UpperCamelCase__ = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
UpperCamelCase__ = scale_width
else:
# fit height
UpperCamelCase__ = scale_height
UpperCamelCase__ = constraint_to_multiple_of(scale_height * input_height , multiple=_UpperCamelCase )
UpperCamelCase__ = constraint_to_multiple_of(scale_width * input_width , multiple=_UpperCamelCase )
return (new_height, new_width)
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
_lowerCamelCase : Dict = ['pixel_values']
def __init__( self : str, a_ : bool = True, a_ : Dict[str, int] = None, a_ : PILImageResampling = PILImageResampling.BILINEAR, a_ : bool = False, a_ : int = 1, a_ : bool = True, a_ : Union[int, float] = 1 / 255, a_ : bool = True, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, **a_ : List[str], ):
"""simple docstring"""
super().__init__(**a_ )
UpperCamelCase__ = size if size is not None else {"height": 384, "width": 384}
UpperCamelCase__ = get_size_dict(a_ )
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of
UpperCamelCase__ = resample
UpperCamelCase__ = do_rescale
UpperCamelCase__ = rescale_factor
UpperCamelCase__ = do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase_ ( self : List[str], a_ : np.ndarray, a_ : Dict[str, int], a_ : bool = False, a_ : int = 1, a_ : PILImageResampling = PILImageResampling.BICUBIC, a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : int, ):
"""simple docstring"""
UpperCamelCase__ = get_size_dict(a_ )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
UpperCamelCase__ = get_resize_output_image_size(
a_, output_size=(size["height"], size["width"]), keep_aspect_ratio=a_, multiple=a_, )
return resize(a_, size=a_, resample=a_, data_format=a_, **a_ )
def lowercase_ ( self : int, a_ : np.ndarray, a_ : Union[int, float], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Tuple, ):
"""simple docstring"""
return rescale(a_, scale=a_, data_format=a_, **a_ )
def lowercase_ ( self : List[Any], a_ : np.ndarray, a_ : Union[float, List[float]], a_ : Union[float, List[float]], a_ : Optional[Union[str, ChannelDimension]] = None, **a_ : Dict, ):
"""simple docstring"""
return normalize(a_, mean=a_, std=a_, data_format=a_, **a_ )
def lowercase_ ( self : Optional[Any], a_ : ImageInput, a_ : bool = None, a_ : int = None, a_ : bool = None, a_ : int = None, a_ : PILImageResampling = None, a_ : bool = None, a_ : float = None, a_ : bool = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[float, List[float]]] = None, a_ : Optional[Union[str, TensorType]] = None, a_ : ChannelDimension = ChannelDimension.FIRST, **a_ : str, ):
"""simple docstring"""
UpperCamelCase__ = do_resize if do_resize is not None else self.do_resize
UpperCamelCase__ = size if size is not None else self.size
UpperCamelCase__ = get_size_dict(a_ )
UpperCamelCase__ = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
UpperCamelCase__ = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
UpperCamelCase__ = resample if resample is not None else self.resample
UpperCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase__ = image_mean if image_mean is not None else self.image_mean
UpperCamelCase__ = image_std if image_std is not None else self.image_std
UpperCamelCase__ = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCamelCase__ = [to_numpy_array(a_ ) for image in images]
if do_resize:
UpperCamelCase__ = [self.resize(image=a_, size=a_, resample=a_ ) for image in images]
if do_rescale:
UpperCamelCase__ = [self.rescale(image=a_, scale=a_ ) for image in images]
if do_normalize:
UpperCamelCase__ = [self.normalize(image=a_, mean=a_, std=a_ ) for image in images]
UpperCamelCase__ = [to_channel_dimension_format(a_, a_ ) for image in images]
UpperCamelCase__ = {"pixel_values": images}
return BatchFeature(data=a_, tensor_type=a_ )
def lowercase_ ( self : Union[str, Any], a_ : int, a_ : List[Tuple] = None ):
"""simple docstring"""
UpperCamelCase__ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(a_ ) != len(a_ ):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits" )
if is_torch_tensor(a_ ):
UpperCamelCase__ = target_sizes.numpy()
UpperCamelCase__ = []
for idx in range(len(a_ ) ):
UpperCamelCase__ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ), size=target_sizes[idx], mode="bilinear", align_corners=a_ )
UpperCamelCase__ = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(a_ )
else:
UpperCamelCase__ = logits.argmax(dim=1 )
UpperCamelCase__ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 31
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__):
def __init__( self : Any, a_ : VQModel, a_ : UNetaDModel, a_ : DDIMScheduler ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=a_, unet=a_, scheduler=a_ )
@torch.no_grad()
def __call__( self : Union[str, Any], a_ : int = 1, a_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None, a_ : float = 0.0, a_ : int = 50, a_ : Optional[str] = "pil", a_ : bool = True, **a_ : Tuple, ):
"""simple docstring"""
UpperCamelCase__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size), generator=a_, )
UpperCamelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(a_ )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
UpperCamelCase__ = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase__ = {}
if accepts_eta:
UpperCamelCase__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
UpperCamelCase__ = self.scheduler.scale_model_input(a_, a_ )
# predict the noise residual
UpperCamelCase__ = self.unet(a_, a_ ).sample
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(a_, a_, a_, **a_ ).prev_sample
# decode the image latents with the VAE
UpperCamelCase__ = self.vqvae.decode(a_ ).sample
UpperCamelCase__ = (image / 2 + 0.5).clamp(0, 1 )
UpperCamelCase__ = image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(a_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a_ )
| 31
| 1
|
"""simple docstring"""
import math
import random
def __SCREAMING_SNAKE_CASE ( A_ , A_ = False ):
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
__UpperCamelCase : Any = 0.0_2
def __SCREAMING_SNAKE_CASE ( A_ , A_ ):
lowerCAmelCase__ : Tuple = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(A_ ):
# Forward propagation
lowerCAmelCase__ : Dict = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
lowerCAmelCase__ : List[Any] = (expected / 1_00) - layer_a
# Error delta
lowerCAmelCase__ : str = layer_1_error * sigmoid_function(A_ , A_ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCamelCase : List[str] = int(input('''Expected value: '''))
__UpperCamelCase : Optional[Any] = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations))
| 106
|
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
A =logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
A ={
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
A ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
A =sorted(arg_to_scheduler.keys())
A ='{' + ', '.join(arg_to_scheduler_choices) + '}'
class _a ( pl.LightningModule ):
def __init__( self : List[str] , lowercase : argparse.Namespace , lowercase : List[Any]=None , lowercase : Dict="base" , lowercase : Optional[int]=None , lowercase : Dict=None , lowercase : Tuple=None , **lowercase : Optional[int] , ):
'''simple docstring'''
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(lowercase )
UpperCAmelCase = 0
UpperCAmelCase = Path(self.hparams.output_dir )
UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
UpperCAmelCase = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'''num_labels''': num_labels} if num_labels is not None else {}) , cache_dir=lowercase , **lowercase , )
else:
UpperCAmelCase = config
UpperCAmelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(self.hparams , lowercase , lowercase ):
assert hasattr(self.config , lowercase ), f"model config doesn't have a `{p}` attribute"
setattr(self.config , lowercase , getattr(self.hparams , lowercase ) )
if tokenizer is None:
UpperCAmelCase = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=lowercase , )
else:
UpperCAmelCase = tokenizer
UpperCAmelCase = MODEL_MODES[mode]
if model is None:
UpperCAmelCase = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('''.ckpt''' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=lowercase , )
else:
UpperCAmelCase = model
def A ( self : List[Any] , *lowercase : List[str] , **lowercase : List[str] ):
'''simple docstring'''
UpperCAmelCase = self.model_type.from_pretrained(*lowercase , **lowercase )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler]
UpperCAmelCase = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
UpperCAmelCase = {'''scheduler''': scheduler, '''interval''': '''step''', '''frequency''': 1}
return scheduler
def A ( self : str ):
'''simple docstring'''
UpperCAmelCase = self.model
UpperCAmelCase = ['''bias''', '''LayerNorm.weight''']
UpperCAmelCase = [
{
'''params''': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'''weight_decay''': self.hparams.weight_decay,
},
{
'''params''': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
if self.hparams.adafactor:
UpperCAmelCase = Adafactor(
lowercase , lr=self.hparams.learning_rate , scale_parameter=lowercase , relative_step=lowercase )
else:
UpperCAmelCase = AdamW(
lowercase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
UpperCAmelCase = optimizer
UpperCAmelCase = self.get_lr_scheduler()
return [optimizer], [scheduler]
def A ( self : List[Any] , lowercase : int , lowercase : List[str] ):
'''simple docstring'''
return self.validation_step(lowercase , lowercase )
def A ( self : List[Any] , lowercase : Tuple ):
'''simple docstring'''
return self.validation_end(lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def A ( self : List[str] , lowercase : Any ):
'''simple docstring'''
if stage == "test":
UpperCAmelCase = len(self.test_dataloader().dataset )
else:
UpperCAmelCase = self.get_dataloader('''train''' , self.hparams.train_batch_size , shuffle=lowercase )
UpperCAmelCase = len(self.train_dataloader().dataset )
def A ( self : List[str] , lowercase : str , lowercase : int , lowercase : bool = False ):
'''simple docstring'''
raise NotImplementedError('''You must implement this for your task''' )
def A ( self : Union[str, Any] ):
'''simple docstring'''
return self.train_loader
def A ( self : Optional[Any] ):
'''simple docstring'''
return self.get_dataloader('''dev''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
return self.get_dataloader('''test''' , self.hparams.eval_batch_size , shuffle=lowercase )
def A ( self : Any , lowercase : Union[str, Any] ):
'''simple docstring'''
return os.path.join(
self.hparams.data_dir , '''cached_{}_{}_{}'''.format(
lowercase , list(filter(lowercase , self.hparams.model_name_or_path.split('''/''' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def A ( self : List[str] , lowercase : Dict[str, Any] ):
'''simple docstring'''
UpperCAmelCase = self.output_dir.joinpath('''best_tfmr''' )
UpperCAmelCase = self.step_count
self.model.save_pretrained(lowercase )
self.tokenizer.save_pretrained(lowercase )
@staticmethod
def A ( lowercase : Optional[int] , lowercase : List[str] ):
'''simple docstring'''
parser.add_argument(
'''--model_name_or_path''' , default=lowercase , type=lowercase , required=lowercase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--config_name''' , default='''''' , type=lowercase , help='''Pretrained config name or path if not the same as model_name''' )
parser.add_argument(
'''--tokenizer_name''' , default=lowercase , type=lowercase , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument(
'''--cache_dir''' , default=str(Path(lowercase ).parent / '''test_run''' / '''cache''' ) , type=lowercase , help='''Where do you want to store the pre-trained models downloaded from huggingface.co''' , )
parser.add_argument(
'''--encoder_layerdrop''' , type=lowercase , help='''Encoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--decoder_layerdrop''' , type=lowercase , help='''Decoder layer dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--dropout''' , type=lowercase , help='''Dropout probability (Optional). Goes into model.config''' , )
parser.add_argument(
'''--attention_dropout''' , type=lowercase , help='''Attention dropout probability (Optional). Goes into model.config''' , )
parser.add_argument('''--learning_rate''' , default=5E-5 , type=lowercase , help='''The initial learning rate for Adam.''' )
parser.add_argument(
'''--lr_scheduler''' , default='''linear''' , choices=lowercase , metavar=lowercase , type=lowercase , help='''Learning rate scheduler''' , )
parser.add_argument('''--weight_decay''' , default=0.0 , type=lowercase , help='''Weight decay if we apply some.''' )
parser.add_argument('''--adam_epsilon''' , default=1E-8 , type=lowercase , help='''Epsilon for Adam optimizer.''' )
parser.add_argument('''--warmup_steps''' , default=0 , type=lowercase , help='''Linear warmup over warmup_steps.''' )
parser.add_argument('''--num_workers''' , default=4 , type=lowercase , help='''kwarg passed to DataLoader''' )
parser.add_argument('''--num_train_epochs''' , dest='''max_epochs''' , default=3 , type=lowercase )
parser.add_argument('''--train_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--eval_batch_size''' , default=32 , type=lowercase )
parser.add_argument('''--adafactor''' , action='''store_true''' )
class _a ( pl.Callback ):
def A ( self : Dict , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Any ):
'''simple docstring'''
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(lowercase )
class _a ( pl.Callback ):
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : Dict ):
'''simple docstring'''
UpperCAmelCase = trainer.lr_schedulers[0]['''scheduler''']
UpperCAmelCase = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(lowercase )
def A ( self : Tuple , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Validation results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log results
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def A ( self : Dict , lowercase : pl.Trainer , lowercase : pl.LightningModule ):
'''simple docstring'''
rank_zero_info('''***** Test results *****''' )
UpperCAmelCase = trainer.callback_metrics
# Log and save results to file
UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , '''test_results.txt''' )
with open(lowercase , '''w''' ) as writer:
for key in sorted(lowercase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
writer.write('''{} = {}\n'''.format(lowercase , str(metrics[key] ) ) )
def snake_case_ (_a : int , _a : Optional[Any] ):
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'''--output_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''model_checkpoints''' ) , type=_a , help='''The output directory where the model predictions and checkpoints will be written.''' , )
parser.add_argument(
'''--fp16''' , action='''store_true''' , help='''Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit''' , )
parser.add_argument(
'''--fp16_opt_level''' , type=_a , default='''O2''' , help=(
'''For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'''
'''See details at https://nvidia.github.io/apex/amp.html'''
) , )
parser.add_argument('''--n_tpu_cores''' , dest='''tpu_cores''' , type=_a )
parser.add_argument('''--max_grad_norm''' , dest='''gradient_clip_val''' , default=1.0 , type=_a , help='''Max gradient norm''' )
parser.add_argument('''--do_train''' , action='''store_true''' , help='''Whether to run training.''' )
parser.add_argument('''--do_predict''' , action='''store_true''' , help='''Whether to run predictions on the test set.''' )
parser.add_argument(
'''--gradient_accumulation_steps''' , dest='''accumulate_grad_batches''' , type=_a , default=1 , help='''Number of updates steps to accumulate before performing a backward/update pass.''' , )
parser.add_argument('''--seed''' , type=_a , default=4_2 , help='''random seed for initialization''' )
parser.add_argument(
'''--data_dir''' , default=str(Path(_a ).parent / '''test_run''' / '''dummy-train-data''' ) , type=_a , help='''The input data dir. Should contain the training files for the CoNLL-2003 NER task.''' , )
def snake_case_ (_a : BaseTransformer , _a : argparse.Namespace , _a : List[Any]=None , _a : Tuple=True , _a : int=[] , _a : Any=None , _a : int=None , **_a : Optional[Any] , ):
pl.seed_everything(args.seed )
# init model
UpperCAmelCase = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_a )
# add custom checkpoints
if checkpoint_callback is None:
UpperCAmelCase = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='''checkpoint''' , monitor='''val_loss''' , mode='''min''' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_a )
if logging_callback is None:
UpperCAmelCase = LoggingCallback()
UpperCAmelCase = {}
if args.fpaa:
UpperCAmelCase = 1_6
if args.gpus > 1:
UpperCAmelCase = '''auto'''
UpperCAmelCase = '''ddp'''
UpperCAmelCase = args.accumulate_grad_batches
UpperCAmelCase = None
UpperCAmelCase = '''auto'''
UpperCAmelCase = pl.Trainer.from_argparse_args(
_a , weights_summary=_a , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_a , val_check_interval=1 , num_sanity_val_steps=2 , **_a , )
if args.do_train:
trainer.fit(_a )
else:
print('''RAG modeling tests with new set functions successfuly executed!''' )
return trainer
| 34
| 0
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__a: Optional[int] = ["""gpt2"""]
__a: str = """gpt2"""
if is_tf_available():
class UpperCAmelCase ( tf.Module ):
'''simple docstring'''
def __init__( self , __lowerCAmelCase ) -> Any:
super().__init__()
lowercase__ : Tuple = tokenizer
lowercase__ : Tuple = AutoConfig.from_pretrained(__lowerCAmelCase )
lowercase__ : int = TFGPTaLMHeadModel.from_config(__lowerCAmelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Dict = self.tokenizer(__lowerCAmelCase )
lowercase__ : List[str] = tokenized['''input_ids'''].to_tensor()
lowercase__ : Tuple = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
lowercase__ : int = self.model(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase( self ) -> List[str]:
super().setUp()
lowercase__ : List[Any] = [GPTaTokenizer.from_pretrained(__lowerCAmelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
lowercase__ : List[Any] = [TFGPTaTokenizer.from_pretrained(__lowerCAmelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowercase__ : str = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
lowercase__ : Any = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def _lowerCAmelCase( self ) -> List[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
lowercase__ : str = tokenizer([test_inputs] , return_tensors='''tf''' )
lowercase__ : Dict = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
lowercase__ : List[Any] = python_outputs[key].numpy()
lowercase__ : List[str] = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(__lowerCAmelCase , tf.intaa ) == tf_outputs_values ) )
@slow
def _lowerCAmelCase( self ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : Optional[Any] = tf.function(__lowerCAmelCase )
for test_inputs in self.test_sentences:
lowercase__ : List[str] = tf.constant(__lowerCAmelCase )
lowercase__ : Optional[Any] = compiled_tokenizer(__lowerCAmelCase )
lowercase__ : Union[str, Any] = tf_tokenizer(__lowerCAmelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _lowerCAmelCase( self ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : Any = ModelToSave(tokenizer=__lowerCAmelCase )
lowercase__ : str = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ : Optional[int] = model.serving(__lowerCAmelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowercase__ : Tuple = Path(__lowerCAmelCase ) / '''saved.model'''
tf.saved_model.save(__lowerCAmelCase , __lowerCAmelCase , signatures={'''serving_default''': model.serving} )
lowercase__ : Dict = tf.saved_model.load(__lowerCAmelCase )
lowercase__ : int = loaded_model.signatures['''serving_default'''](__lowerCAmelCase )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _lowerCAmelCase( self ) -> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
lowercase__ : List[Any] = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ : Tuple = tf_tokenizer(__lowerCAmelCase ) # Build model with some sample inputs
lowercase__ : Dict = tf_tokenizer.get_config()
lowercase__ : int = TFGPTaTokenizer.from_config(__lowerCAmelCase )
lowercase__ : Tuple = model_from_config(__lowerCAmelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _lowerCAmelCase( self ) -> List[str]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
lowercase__ : List[str] = 123123
for max_length in [3, 5, 1024]:
lowercase__ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] )
lowercase__ : Optional[Any] = tf_tokenizer(__lowerCAmelCase , max_length=__lowerCAmelCase )
lowercase__ : List[str] = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 352
|
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
__a: Optional[Any] = logging.get_logger(__name__)
@add_end_docstrings(a__ )
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , **__lowerCAmelCase ) -> int:
super().__init__(**__lowerCAmelCase )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ) -> List[str]:
return super().__call__(__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> Optional[Any]:
lowercase__ : str = {}
if "candidate_labels" in kwargs:
lowercase__ : str = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
lowercase__ : str = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase="This is a photo of {}." ) -> Any:
lowercase__ : Union[str, Any] = load_image(__lowerCAmelCase )
lowercase__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
lowercase__ : Union[str, Any] = candidate_labels
lowercase__ : int = [hypothesis_template.format(__lowerCAmelCase ) for x in candidate_labels]
lowercase__ : Any = self.tokenizer(__lowerCAmelCase , return_tensors=self.framework , padding=__lowerCAmelCase )
lowercase__ : Any = [text_inputs]
return inputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> Optional[Any]:
lowercase__ : Any = model_inputs.pop('''candidate_labels''' )
lowercase__ : int = model_inputs.pop('''text_inputs''' )
if isinstance(text_inputs[0] , __lowerCAmelCase ):
lowercase__ : Union[str, Any] = text_inputs[0]
else:
# Batching case.
lowercase__ : Optional[Any] = text_inputs[0][0]
lowercase__ : Any = self.model(**__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Any = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : Union[str, Any] = model_outputs.pop('''candidate_labels''' )
lowercase__ : Optional[int] = model_outputs['''logits'''][0]
if self.framework == "pt":
lowercase__ : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 )
lowercase__ : Any = probs.tolist()
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
lowercase__ : Dict = [scores]
elif self.framework == "tf":
lowercase__ : List[Any] = stable_softmax(__lowerCAmelCase , axis=-1 )
lowercase__ : str = probs.numpy().tolist()
else:
raise ValueError(F"""Unsupported framework: {self.framework}""" )
lowercase__ : Optional[int] = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(__lowerCAmelCase , __lowerCAmelCase ) , key=lambda __lowerCAmelCase : -x[0] )
]
return result
| 214
| 0
|
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class UpperCAmelCase ( datasets.BuilderConfig ):
'''simple docstring'''
snake_case_ = None
class UpperCAmelCase ( datasets.ArrowBasedBuilder ):
'''simple docstring'''
snake_case_ = PandasConfig
def UpperCamelCase_ ( self : List[Any] ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase_ ( self : str ,A : Dict ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
__A = dl_manager.download_and_extract(self.config.data_files )
if isinstance(A ,(str, list, tuple) ):
__A = data_files
if isinstance(A ,A ):
__A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A = [dl_manager.iter_files(A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={"files": files} )]
__A = []
for split_name, files in data_files.items():
if isinstance(A ,A ):
__A = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
__A = [dl_manager.iter_files(A ) for file in files]
splits.append(datasets.SplitGenerator(name=A ,gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase_ ( self : Dict ,A : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
__A = table_cast(A ,self.config.features.arrow_schema )
return pa_table
def UpperCamelCase_ ( self : Dict ,A : Union[str, Any] ):
for i, file in enumerate(itertools.chain.from_iterable(A ) ):
with open(A ,"rb" ) as f:
__A = pa.Table.from_pandas(pd.read_pickle(A ) )
yield i, self._cast_table(A )
| 15
|
"""simple docstring"""
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
A__ : Optional[Any] = logging.getLogger(__name__)
def _snake_case ( ) -> int:
lowerCamelCase_ : Tuple =argparse.ArgumentParser(
description="Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids)." )
parser.add_argument("--file_path" , type=lowerCamelCase__ , default="data/dump.txt" , help="The path to the data." )
parser.add_argument("--tokenizer_type" , type=lowerCamelCase__ , default="bert" , choices=["bert", "roberta", "gpt2"] )
parser.add_argument("--tokenizer_name" , type=lowerCamelCase__ , default="bert-base-uncased" , help="The tokenizer to use." )
parser.add_argument("--dump_file" , type=lowerCamelCase__ , default="data/dump" , help="The dump file prefix." )
lowerCamelCase_ : Tuple =parser.parse_args()
logger.info(F"""Loading Tokenizer ({args.tokenizer_name})""" )
if args.tokenizer_type == "bert":
lowerCamelCase_ : Tuple =BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : Optional[Any] =tokenizer.special_tokens_map["cls_token"] # `[CLS]`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["sep_token"] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCamelCase_ : str =RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : List[Any] =tokenizer.special_tokens_map["cls_token"] # `<s>`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["sep_token"] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCamelCase_ : Tuple =GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCamelCase_ : Dict =tokenizer.special_tokens_map["bos_token"] # `<|endoftext|>`
lowerCamelCase_ : Any =tokenizer.special_tokens_map["eos_token"] # `<|endoftext|>`
logger.info(F"""Loading text from {args.file_path}""" )
with open(args.file_path , "r" , encoding="utf8" ) as fp:
lowerCamelCase_ : Optional[int] =fp.readlines()
logger.info("Start encoding" )
logger.info(F"""{len(lowerCamelCase__ )} examples to process.""" )
lowerCamelCase_ : str =[]
lowerCamelCase_ : Union[str, Any] =0
lowerCamelCase_ : List[str] =10_000
lowerCamelCase_ : int =time.time()
for text in data:
lowerCamelCase_ : List[str] =F"""{bos} {text.strip()} {sep}"""
lowerCamelCase_ : str =tokenizer.encode(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ )
rslt.append(lowerCamelCase__ )
iter += 1
if iter % interval == 0:
lowerCamelCase_ : List[Any] =time.time()
logger.info(F"""{iter} examples processed. - {(end-start):.2f}s/{interval}expl""" )
lowerCamelCase_ : Tuple =time.time()
logger.info("Finished binarization" )
logger.info(F"""{len(lowerCamelCase__ )} examples processed.""" )
lowerCamelCase_ : Optional[Any] =F"""{args.dump_file}.{args.tokenizer_name}.pickle"""
lowerCamelCase_ : Optional[int] =tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCamelCase_ : int =[np.uintaa(lowerCamelCase__ ) for d in rslt]
else:
lowerCamelCase_ : Tuple =[np.intaa(lowerCamelCase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(F"""Dump to {dp_file}""" )
with open(lowerCamelCase__ , "wb" ) as handle:
pickle.dump(rslt_ , lowerCamelCase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 144
| 0
|
"""simple docstring"""
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
|
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float:
'''simple docstring'''
__UpperCAmelCase : Tuple = sorted(numsa + numsa )
__UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()]
UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 320
| 1
|
"""simple docstring"""
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : Any = BertJapaneseTokenizer
_snake_case : Optional[Any] = False
_snake_case : List[Any] = True
def a__ ( self ):
super().setUp()
__a = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def a__ ( self , lowerCamelCase ):
__a = "こんにちは、世界。 \nこんばんは、世界。"
__a = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def a__ ( self , lowerCamelCase ):
__a , __a = self.get_input_output_texts(lowerCamelCase )
__a = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__a = tokenizer.decode(lowerCamelCase , clean_up_tokenization_spaces=lowerCamelCase )
return text, ids
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file )
__a = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowerCamelCase )
__a = "こんにちは、世界。\nこんばんは、世界。"
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__a = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase , "wb" ) as handle:
pickle.dump(lowerCamelCase , lowerCamelCase )
with open(lowerCamelCase , "rb" ) as handle:
__a = pickle.load(lowerCamelCase )
__a = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def a__ ( self ):
__a = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def a__ ( self ):
try:
__a = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def a__ ( self ):
try:
__a = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def a__ ( self ):
__a = MecabTokenizer(do_lower_case=lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def a__ ( self ):
try:
__a = MecabTokenizer(
do_lower_case=lowerCamelCase , normalize_text=lowerCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def a__ ( self ):
__a = MecabTokenizer(normalize_text=lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowerCamelCase )
__a = "こんにちは、世界。\nこんばんは、世界。"
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__a = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase , "wb" ) as handle:
pickle.dump(lowerCamelCase , lowerCamelCase )
with open(lowerCamelCase , "rb" ) as handle:
__a = pickle.load(lowerCamelCase )
__a = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(do_lower_case=lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(normalize_text=lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def a__ ( self ):
__a = SudachiTokenizer(trim_whitespace=lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowerCamelCase )
__a = "こんにちは、世界。\nこんばんは、世界。"
__a = tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
__a = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCamelCase , "wb" ) as handle:
pickle.dump(lowerCamelCase , lowerCamelCase )
with open(lowerCamelCase , "rb" ) as handle:
__a = pickle.load(lowerCamelCase )
__a = tokenizer_new.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer(do_lower_case=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer(normalize_text=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer(trim_whitespace=lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def a__ ( self ):
__a = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def a__ ( self ):
__a = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
__a = {}
for i, token in enumerate(lowerCamelCase ):
__a = i
__a = WordpieceTokenizer(vocab=lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def a__ ( self ):
__a = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
__a = tokenizer.subword_tokenizer
__a = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowerCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
__a = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowerCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def a__ ( self ):
__a = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
__a = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case__ ( snake_case_, unittest.TestCase ):
_snake_case : str = BertJapaneseTokenizer
_snake_case : Dict = False
def a__ ( self ):
super().setUp()
__a = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def a__ ( self , **lowerCamelCase ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCamelCase )
def a__ ( self , lowerCamelCase ):
__a = "こんにちは、世界。 \nこんばんは、世界。"
__a = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
pass # TODO add if relevant
def a__ ( self ):
__a = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
__a = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowerCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def a__ ( self ):
__a = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
__a = {}
for i, token in enumerate(lowerCamelCase ):
__a = i
__a = CharacterTokenizer(vocab=lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def a__ ( self ):
__a = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
__a = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCamelCase )
__a = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase )
__a = tokenizer.build_inputs_with_special_tokens(lowerCamelCase , lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = "cl-tohoku/bert-base-japanese"
__a = AutoTokenizer.from_pretrained(lowerCamelCase )
self.assertIsInstance(lowerCamelCase , lowerCamelCase )
class snake_case__ ( unittest.TestCase ):
def a__ ( self ):
__a = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
__a = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 261
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class snake_case__ ( snake_case_ ):
_snake_case : "DiagonalGaussianDistribution"
class snake_case__ ( snake_case_, snake_case_ ):
_snake_case : Optional[Any] = True
@register_to_config
def __init__( self , lowerCamelCase = 3 , lowerCamelCase = 3 , lowerCamelCase = ("DownEncoderBlock2D",) , lowerCamelCase = ("UpDecoderBlock2D",) , lowerCamelCase = (64,) , lowerCamelCase = 1 , lowerCamelCase = "silu" , lowerCamelCase = 4 , lowerCamelCase = 32 , lowerCamelCase = 32 , lowerCamelCase = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , down_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , act_fn=lowerCamelCase , norm_num_groups=lowerCamelCase , double_z=lowerCamelCase , )
# pass init params to Decoder
__a = Decoder(
in_channels=lowerCamelCase , out_channels=lowerCamelCase , up_block_types=lowerCamelCase , block_out_channels=lowerCamelCase , layers_per_block=lowerCamelCase , norm_num_groups=lowerCamelCase , act_fn=lowerCamelCase , )
__a = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__a = nn.Convad(lowerCamelCase , lowerCamelCase , 1 )
__a = False
__a = False
# only relevant if vae tiling is enabled
__a = self.config.sample_size
__a = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__a = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__a = 0.25
def a__ ( self , lowerCamelCase , lowerCamelCase=False ):
if isinstance(lowerCamelCase , (Encoder, Decoder) ):
__a = value
def a__ ( self , lowerCamelCase = True ):
__a = use_tiling
def a__ ( self ):
self.enable_tiling(lowerCamelCase )
def a__ ( self ):
__a = True
def a__ ( self ):
__a = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def a__ ( self ):
__a = {}
def fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
__a = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase , lowerCamelCase , lowerCamelCase )
return processors
def a__ ( self , lowerCamelCase ):
__a = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase , lowerCamelCase ) and len(lowerCamelCase ) != count:
raise ValueError(
F"A dict of processors was passed, but the number of processors {len(lowerCamelCase )} does not match the"
F" number of attention layers: {count}. Please make sure to pass {count} processor classes." )
def fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , "set_processor" ):
if not isinstance(lowerCamelCase , lowerCamelCase ):
module.set_processor(lowerCamelCase )
else:
module.set_processor(processor.pop(F"{name}.processor" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"{name}.{sub_name}" , lowerCamelCase , lowerCamelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase , lowerCamelCase , lowerCamelCase )
def a__ ( self ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase , return_dict=lowerCamelCase )
if self.use_slicing and x.shape[0] > 1:
__a = [self.encoder(lowerCamelCase ) for x_slice in x.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase , return_dict=lowerCamelCase )
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
@apply_forward_hook
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
if self.use_slicing and z.shape[0] > 1:
__a = [self._decode(lowerCamelCase ).sample for z_slice in z.split(1 )]
__a = torch.cat(lowerCamelCase )
else:
__a = self._decode(lowerCamelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[2] , b.shape[2] , lowerCamelCase )
for y in range(lowerCamelCase ):
__a = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__a = min(a.shape[3] , b.shape[3] , lowerCamelCase )
for x in range(lowerCamelCase ):
__a = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_latent_min_size * self.tile_overlap_factor )
__a = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__a = []
for i in range(0 , x.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , x.shape[3] , lowerCamelCase ):
__a = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__a = self.encoder(lowerCamelCase )
__a = self.quant_conv(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
__a = DiagonalGaussianDistribution(lowerCamelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = True ):
__a = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__a = int(self.tile_sample_min_size * self.tile_overlap_factor )
__a = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__a = []
for i in range(0 , z.shape[2] , lowerCamelCase ):
__a = []
for j in range(0 , z.shape[3] , lowerCamelCase ):
__a = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__a = self.post_quant_conv(lowerCamelCase )
__a = self.decoder(lowerCamelCase )
row.append(lowerCamelCase )
rows.append(lowerCamelCase )
__a = []
for i, row in enumerate(lowerCamelCase ):
__a = []
for j, tile in enumerate(lowerCamelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__a = self.blend_v(rows[i - 1][j] , lowerCamelCase , lowerCamelCase )
if j > 0:
__a = self.blend_h(row[j - 1] , lowerCamelCase , lowerCamelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase , dim=3 ) )
__a = torch.cat(lowerCamelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
def a__ ( self , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = True , lowerCamelCase = None , ):
__a = sample
__a = self.encode(lowerCamelCase ).latent_dist
if sample_posterior:
__a = posterior.sample(generator=lowerCamelCase )
else:
__a = posterior.mode()
__a = self.decode(lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase )
| 261
| 1
|
import unittest
import numpy as np
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
lowercase__ = np.shape(SCREAMING_SNAKE_CASE )
lowercase__ = np.shape(SCREAMING_SNAKE_CASE )
lowercase__ = np.shape(SCREAMING_SNAKE_CASE )
if shape_a[0] != shape_b[0]:
lowercase__ = (
'''Expected the same number of rows for A and B. '''
f'Instead found A of size {shape_a} and B of size {shape_b}'
)
raise ValueError(SCREAMING_SNAKE_CASE )
if shape_b[1] != shape_c[1]:
lowercase__ = (
'''Expected the same number of columns for B and C. '''
f'Instead found B of size {shape_b} and C of size {shape_c}'
)
raise ValueError(SCREAMING_SNAKE_CASE )
lowercase__ = pseudo_inv
if a_inv is None:
try:
lowercase__ = np.linalg.inv(SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
raise ValueError(
'''Input matrix A is not invertible. Cannot compute Schur complement.''' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _a ( unittest.TestCase ):
def lowerCamelCase_ ( self: Union[str, Any] ) -> None:
"""simple docstring"""
lowercase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ = np.array([[2, 1], [6, 3]] )
lowercase__ = schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
lowercase__ = np.block([[a, b], [b.T, c]] )
lowercase__ = np.linalg.det(UpperCamelCase_ )
lowercase__ = np.linalg.det(UpperCamelCase_ )
lowercase__ = np.linalg.det(UpperCamelCase_ )
self.assertAlmostEqual(UpperCamelCase_ , det_a * det_s )
def lowerCamelCase_ ( self: int ) -> None:
"""simple docstring"""
lowercase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase_ ):
schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase_ ( self: Tuple ) -> None:
"""simple docstring"""
lowercase__ = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
lowercase__ = np.array([[0, 3], [3, 0], [2, 3]] )
lowercase__ = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase_ ):
schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 364
|
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class _a :
_lowercase : int
_lowercase : TreeNode | None = None
_lowercase : TreeNode | None = None
lowerCAmelCase = namedtuple('CoinsDistribResult', 'moves excess')
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if root is None:
return 0
# Validation
def count_nodes(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(SCREAMING_SNAKE_CASE ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(SCREAMING_SNAKE_CASE ) != count_coins(SCREAMING_SNAKE_CASE ):
raise ValueError('''The nodes number should be same as the number of coins''' )
# Main calculation
def get_distrib(SCREAMING_SNAKE_CASE ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
lowercase__ , lowercase__ = get_distrib(node.left )
lowercase__ , lowercase__ = get_distrib(node.right )
lowercase__ = 1 - left_distrib_excess
lowercase__ = 1 - right_distrib_excess
lowercase__ = (
left_distrib_moves
+ right_distrib_moves
+ abs(SCREAMING_SNAKE_CASE )
+ abs(SCREAMING_SNAKE_CASE )
)
lowercase__ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return get_distrib(SCREAMING_SNAKE_CASE )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 93
| 0
|
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
"""constant""": get_constant_schedule,
"""constant_w_warmup""": get_constant_schedule_with_warmup,
}
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Any , A : Optional[int]=None , A : Tuple=None , *A : Tuple , **A : List[str] ):
super().__init__(*A , **A )
if config is None:
assert isinstance(self.model , A ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
_UpperCAmelCase : str = self.model.config
else:
_UpperCAmelCase : List[str] = config
_UpperCAmelCase : List[Any] = data_args
_UpperCAmelCase : str = self.config.tgt_vocab_size if isinstance(self.config , A ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
" padding.." )
if self.args.label_smoothing == 0:
_UpperCAmelCase : Optional[Any] = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
_UpperCAmelCase : Dict = label_smoothed_nll_loss
def _A ( self : Tuple , A : int ):
if self.optimizer is None:
_UpperCAmelCase : Tuple = ["bias", "LayerNorm.weight"]
_UpperCAmelCase : str = [
{
"params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
_UpperCAmelCase : int = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
_UpperCAmelCase : List[str] = Adafactor
_UpperCAmelCase : List[Any] = {"scale_parameter": False, "relative_step": False}
else:
_UpperCAmelCase : List[str] = AdamW
_UpperCAmelCase : List[str] = {
"betas": (self.args.adam_betaa, self.args.adam_betaa),
"eps": self.args.adam_epsilon,
}
_UpperCAmelCase : List[Any] = self.args.learning_rate
if self.sharded_ddp:
_UpperCAmelCase : List[Any] = OSS(
params=A , optim=A , **A , )
else:
_UpperCAmelCase : Union[str, Any] = optimizer_cls(A , **A )
if self.lr_scheduler is None:
_UpperCAmelCase : List[str] = self._get_lr_scheduler(A )
else: # ignoring --lr_scheduler
logger.warning("scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored." )
def _A ( self : List[str] , A : Optional[int] ):
_UpperCAmelCase : List[str] = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
_UpperCAmelCase : Optional[Any] = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
_UpperCAmelCase : str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
_UpperCAmelCase : str = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A )
return scheduler
def _A ( self : Tuple ):
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def _A ( self : Any , A : Union[str, Any] , A : Union[str, Any] , A : List[Any] ):
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
_UpperCAmelCase : List[str] = model(**A , use_cache=A )[0]
_UpperCAmelCase : int = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
_UpperCAmelCase , _UpperCAmelCase : Any = model(**A , labels=A , use_cache=A )[:2]
else:
# compute label smoothed loss
_UpperCAmelCase : Optional[int] = model(**A , use_cache=A )[0]
_UpperCAmelCase : List[str] = torch.nn.functional.log_softmax(A , dim=-1 )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.loss_fn(A , A , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def _A ( self : List[str] , A : Optional[int] , A : Optional[int] ):
_UpperCAmelCase : Union[str, Any] = inputs.pop("labels" )
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._compute_loss(A , A , A )
return loss
def _A ( self : List[str] , A : nn.Module , A : Dict[str, Union[torch.Tensor, Any]] , A : bool , A : Optional[List[str]] = None , ):
_UpperCAmelCase : List[str] = self._prepare_inputs(A )
_UpperCAmelCase : Dict = {
"max_length": self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
"num_beams": self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
_UpperCAmelCase : Dict = self.model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , **A , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : int = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
_UpperCAmelCase : Any = inputs.pop("labels" )
with torch.no_grad():
# compute loss on predict data
_UpperCAmelCase , _UpperCAmelCase : str = self._compute_loss(A , A , A )
_UpperCAmelCase : List[str] = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
_UpperCAmelCase : str = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
_UpperCAmelCase : Optional[Any] = self._pad_tensors_to_max_len(A , gen_kwargs["max_length"] )
return (loss, logits, labels)
def _A ( self : Dict , A : int , A : List[str] ):
# If PAD token is not defined at least EOS token has to be defined
_UpperCAmelCase : Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
"Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be"
F""" padded to `max_length`={max_length}""" )
_UpperCAmelCase : Tuple = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
_UpperCAmelCase : Tuple = tensor
return padded_tensor
| 31
|
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
__SCREAMING_SNAKE_CASE : Dict = get_logger(__name__)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[str] = None ):
_UpperCAmelCase : Dict = (
os.path.join(A , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : Union[str, Any] = Extractor
def _A ( self : Tuple , A : str ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Dict = os.path.abspath(A )
return os.path.join(self.extract_dir , hash_url_to_filename(A ) )
def _A ( self : int , A : str , A : bool ):
return force_extract or (
not os.path.isfile(A ) and not (os.path.isdir(A ) and os.listdir(A ))
)
def _A ( self : Optional[int] , A : str , A : bool = False ):
_UpperCAmelCase : Union[str, Any] = self.extractor.infer_extractor_format(A )
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[Any] = self._get_output_path(A )
if self._do_extract(A , A ):
self.extractor.extract(A , A , A )
return output_path
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
@abstractmethod
def _A ( cls : str , A : Union[Path, str] , **A : Dict ):
...
@staticmethod
@abstractmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
...
class lowerCamelCase_ (snake_case__ , snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[bytes] = []
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
with open(A , "rb" ) as f:
return f.read(A )
@classmethod
def _A ( cls : Any , A : Union[Path, str] , A : bytes = b"" ):
if not magic_number:
_UpperCAmelCase : Any = max(len(A ) for cls_magic_number in cls.magic_numbers )
try:
_UpperCAmelCase : int = cls.read_magic_number(A , A )
except OSError:
return False
return any(magic_number.startswith(A ) for cls_magic_number in cls.magic_numbers )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
@classmethod
def _A ( cls : str , A : Union[Path, str] , **A : List[Any] ):
return tarfile.is_tarfile(A )
@staticmethod
def _A ( A : Union[str, Any] , A : str ):
def resolved(A : str ) -> str:
return os.path.realpath(os.path.abspath(A ) )
def badpath(A : str , A : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(A , A ) ).startswith(A )
def badlink(A : str , A : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : List[str] = resolved(os.path.join(A , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=A )
_UpperCAmelCase : Optional[int] = resolved(A )
for finfo in members:
if badpath(finfo.name , A ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(A , A ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : int = tarfile.open(A )
tar_file.extractall(A , members=TarExtractor.safemembers(A , A ) )
tar_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Union[str, Any] = [b"\x1F\x8B"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with gzip.open(A , "rb" ) as gzip_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def _A ( cls : Dict , A : Union[Path, str] , A : bytes = b"" ):
if super().is_extractable(A , magic_number=A ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(A , "rb" ) as fp:
_UpperCAmelCase : Tuple = _EndRecData(A )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : Dict = fp.read(A ) # CD is where we expect it to be
if len(A ) == sizeCentralDir:
_UpperCAmelCase : Any = struct.unpack(A , A ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
os.makedirs(A , exist_ok=A )
with zipfile.ZipFile(A , "r" ) as zip_file:
zip_file.extractall(A )
zip_file.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Dict = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with lzma.open(A ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[str] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(A , exist_ok=A )
_UpperCAmelCase : List[str] = rarfile.RarFile(A )
rf.extractall(A )
rf.close()
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
_UpperCAmelCase : Optional[Any] = zstd.ZstdDecompressor()
with open(A , "rb" ) as ifh, open(A , "wb" ) as ofh:
dctx.copy_stream(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[Any] = [b"\x42\x5A\x68"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
with bza.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: List[Any] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(A , exist_ok=A )
with pyazr.SevenZipFile(A , "r" ) as archive:
archive.extractall(A )
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
__UpperCamelCase: Optional[int] = [b"\x04\x22\x4D\x18"]
@staticmethod
def _A ( A : Union[Path, str] , A : Union[Path, str] ):
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(A , "rb" ) as compressed_file:
with open(A , "wb" ) as extracted_file:
shutil.copyfileobj(A , A )
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _A ( cls : List[Any] ):
return max(
len(A )
for extractor in cls.extractors.values()
if issubclass(A , A )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _A ( A : Union[Path, str] , A : int ):
try:
return MagicNumberBaseExtractor.read_magic_number(A , magic_number_length=A )
except OSError:
return b""
@classmethod
def _A ( cls : Optional[Any] , A : Union[Path, str] , A : bool = False ):
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=A , )
_UpperCAmelCase : Union[str, Any] = cls.infer_extractor_format(A )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _A ( cls : Dict , A : Union[Path, str] ): # <Added version="2.4.0"/>
_UpperCAmelCase : Optional[int] = cls._get_magic_number_max_length()
_UpperCAmelCase : str = cls._read_magic_number(A , A )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(A , magic_number=A ):
return extractor_format
@classmethod
def _A ( cls : List[str] , A : Union[Path, str] , A : Union[Path, str] , A : Optional[str] = None , A : Optional[BaseExtractor] = "deprecated" , ):
os.makedirs(os.path.dirname(A ) , exist_ok=A )
# Prevent parallel extractions
_UpperCAmelCase : Tuple = str(Path(A ).with_suffix(".lock" ) )
with FileLock(A ):
shutil.rmtree(A , ignore_errors=A )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(A , A ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=A , )
_UpperCAmelCase : Tuple = extractor if extractor != "deprecated" else extractor_format
else:
_UpperCAmelCase : Tuple = cls.extractors[extractor_format]
return extractor.extract(A , A )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(A ):
return extractor.extract(A , A )
| 31
| 1
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = [0] * len(_A )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = [1] * len(_A )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
SCREAMING_SNAKE_CASE__ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
SCREAMING_SNAKE_CASE__ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(_A )
print(max(_A ) )
# Adjacency list of Graph
_SCREAMING_SNAKE_CASE : List[str] = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 371
|
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
_SCREAMING_SNAKE_CASE : str = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE__ = model_type_to_module_name(_A )
SCREAMING_SNAKE_CASE__ = importlib.import_module(F'''.{module_name}''' , '''transformers.models''' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '''__name__''' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE__ = importlib.import_module('''transformers''' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def UpperCAmelCase_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(_A , encoding='''utf-8''' ) as reader:
return json.load(_A )
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ) -> int:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def lowercase_ ( cls : Optional[int] , __lowerCamelCase : Any , **__lowerCamelCase : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = kwargs.pop('''config''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''trust_remote_code''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = config_dict.get('''image_processor_type''' , __lowerCamelCase )
SCREAMING_SNAKE_CASE__ = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
SCREAMING_SNAKE_CASE__ = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
SCREAMING_SNAKE_CASE__ = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ = config_dict['''auto_map''']['''AutoFeatureExtractor''']
SCREAMING_SNAKE_CASE__ = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
SCREAMING_SNAKE_CASE__ = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
SCREAMING_SNAKE_CASE__ = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
SCREAMING_SNAKE_CASE__ = image_processor_class_from_name(__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = image_processor_auto_map is not None
SCREAMING_SNAKE_CASE__ = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
SCREAMING_SNAKE_CASE__ = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE__ = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs.pop('''code_revision''' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
SCREAMING_SNAKE_CASE__ = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
f'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
f'''`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowercase_ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] ) -> str:
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 218
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Dict:
a = "| <pad> <unk> <s> </s> a b c d e f g h i j k".split()
a = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
a = {
"unk_token": "<unk>",
"bos_token": "<s>",
"eos_token": "</s>",
}
a = {
"feature_size": 1,
"padding_value": 0.0,
"sampling_rate": 1_60_00,
"return_attention_mask": False,
"do_normalize": True,
}
a = tempfile.mkdtemp()
a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
a = os.path.join(self.tmpdirname , __lowerCamelCase )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
with open(self.feature_extraction_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) + "\n" )
# load decoder from hub
a = "hf-internal-testing/ngram-beam-search-decoder"
def __UpperCAmelCase ( self : str , **__lowerCamelCase : str ) -> int:
a = self.add_kwargs_tokens_map.copy()
kwargs.update(__lowerCamelCase )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : str , **__lowerCamelCase : Dict ) -> int:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **__lowerCamelCase )
def __UpperCAmelCase ( self : Union[str, Any] , **__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **__lowerCamelCase )
def __UpperCAmelCase ( self : Any ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def __UpperCAmelCase ( self : Union[str, Any] ) -> List[str]:
a = self.get_tokenizer()
a = self.get_feature_extractor()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
a = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , __lowerCamelCase )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , __lowerCamelCase )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> Any:
a = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
a = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
a = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(["xx"] )
with self.assertRaisesRegex(__lowerCamelCase , "include" ):
WavaVecaProcessorWithLM(
tokenizer=__lowerCamelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def __UpperCAmelCase ( self : Optional[Any] ) -> List[str]:
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase )
a = floats_list((3, 10_00) )
a = feature_extractor(__lowerCamelCase , return_tensors="np" )
a = processor(__lowerCamelCase , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCAmelCase ( self : Tuple ) -> List[Any]:
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase )
a = "This is a test string"
a = processor(text=__lowerCamelCase )
a = tokenizer(__lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCAmelCase ( self : int , __lowerCamelCase : List[str]=(2, 10, 16) , __lowerCamelCase : Any=77 ) -> List[Any]:
np.random.seed(__lowerCamelCase )
return np.random.rand(*__lowerCamelCase )
def __UpperCAmelCase ( self : Dict ) -> int:
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase )
a = self._get_dummy_logits(shape=(10, 16) , seed=13 )
a = processor.decode(__lowerCamelCase )
a = decoder.decode_beams(__lowerCamelCase )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual("</s> <s> </s>" , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ["fork"], ["spawn"]] )
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Any ) -> str:
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase )
a = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
a = processor.batch_decode(__lowerCamelCase )
else:
with get_context(__lowerCamelCase ).Pool() as pool:
a = processor.batch_decode(__lowerCamelCase , __lowerCamelCase )
a = list(__lowerCamelCase )
with get_context("fork" ).Pool() as p:
a = decoder.decode_beams_batch(__lowerCamelCase , __lowerCamelCase )
a , a , a = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(__lowerCamelCase , decoded_processor.text )
self.assertListEqual(["<s> <s> </s>", "<s> <s> <s>"] , decoded_processor.text )
self.assertListEqual(__lowerCamelCase , decoded_processor.logit_score )
self.assertListEqual(__lowerCamelCase , decoded_processor.lm_score )
def __UpperCAmelCase ( self : Dict ) -> Any:
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase )
a = self._get_dummy_logits()
a = 15
a = -20.0
a = -4.0
a = processor.batch_decode(
__lowerCamelCase , beam_width=__lowerCamelCase , beam_prune_logp=__lowerCamelCase , token_min_logp=__lowerCamelCase , )
a = decoded_processor_out.text
a = list(__lowerCamelCase )
with get_context("fork" ).Pool() as pool:
a = decoder.decode_beams_batch(
__lowerCamelCase , __lowerCamelCase , beam_width=__lowerCamelCase , beam_prune_logp=__lowerCamelCase , token_min_logp=__lowerCamelCase , )
a = [d[0][0] for d in decoded_decoder_out]
a = [d[0][2] for d in decoded_decoder_out]
a = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(["</s> <s> <s>", "<s> <s> <s>"] , __lowerCamelCase )
self.assertTrue(np.array_equal(__lowerCamelCase , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-20.054, -18.447] , __lowerCamelCase , atol=1e-3 ) )
self.assertTrue(np.array_equal(__lowerCamelCase , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-15.554, -13.9_474] , __lowerCamelCase , atol=1e-3 ) )
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[Any]:
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase )
a = self._get_dummy_logits()
a = 2.0
a = 5.0
a = -20.0
a = True
a = processor.batch_decode(
__lowerCamelCase , alpha=__lowerCamelCase , beta=__lowerCamelCase , unk_score_offset=__lowerCamelCase , lm_score_boundary=__lowerCamelCase , )
a = decoded_processor_out.text
a = list(__lowerCamelCase )
decoder.reset_params(
alpha=__lowerCamelCase , beta=__lowerCamelCase , unk_score_offset=__lowerCamelCase , lm_score_boundary=__lowerCamelCase , )
with get_context("fork" ).Pool() as pool:
a = decoder.decode_beams_batch(
__lowerCamelCase , __lowerCamelCase , )
a = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
self.assertListEqual(["<s> </s> <s> </s> </s>", "</s> </s> <s> </s> </s>"] , __lowerCamelCase )
a = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -20.0 )
self.assertEqual(lm_model.score_boundary , __lowerCamelCase )
def __UpperCAmelCase ( self : Tuple ) -> str:
a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a = processor.decoder.model_container[processor.decoder._model_key]
a = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
a = os.listdir(__lowerCamelCase )
a = ["alphabet.json", "language_model"]
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : str ) -> List[str]:
a = snapshot_download("hf-internal-testing/processor_with_lm" )
a = WavaVecaProcessorWithLM.from_pretrained(__lowerCamelCase )
a = processor.decoder.model_container[processor.decoder._model_key]
a = Path(language_model._kenlm_model.path.decode("utf-8" ) ).parent.parent.absolute()
a = os.listdir(__lowerCamelCase )
a = os.listdir(__lowerCamelCase )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : int ) -> Dict:
a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a = AutoProcessor.from_pretrained("hf-internal-testing/processor_with_lm" )
a = floats_list((3, 10_00) )
a = processor_wavaveca(__lowerCamelCase , return_tensors="np" )
a = processor_auto(__lowerCamelCase , return_tensors="np" )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
a = self._get_dummy_logits()
a = processor_wavaveca.batch_decode(__lowerCamelCase )
a = processor_auto.batch_decode(__lowerCamelCase )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
a = self.get_feature_extractor()
a = self.get_tokenizer()
a = self.get_decoder()
a = WavaVecaProcessorWithLM(tokenizer=__lowerCamelCase , feature_extractor=__lowerCamelCase , decoder=__lowerCamelCase )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : Any , __lowerCamelCase : int ) -> int:
a = [d[key] for d in offsets]
return retrieved_list
def __UpperCAmelCase ( self : List[Any] ) -> int:
a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a = self._get_dummy_logits()[0]
a = processor.decode(__lowerCamelCase , output_word_offsets=__lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(" ".join(self.get_from_offsets(outputs["word_offsets"] , "word" ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"] , "end_offset" ) , [1, 3, 5] )
def __UpperCAmelCase ( self : List[Any] ) -> Any:
a = WavaVecaProcessorWithLM.from_pretrained("hf-internal-testing/processor_with_lm" )
a = self._get_dummy_logits()
a = processor.batch_decode(__lowerCamelCase , output_word_offsets=__lowerCamelCase )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue("text" in outputs )
self.assertTrue("word_offsets" in outputs )
self.assertTrue(isinstance(__lowerCamelCase , __lowerCamelCase ) )
self.assertListEqual(
[" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) for o in outputs["word_offsets"]] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "word" ) , ["<s>", "<s>", "</s>"] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "start_offset" ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs["word_offsets"][0] , "end_offset" ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def __UpperCAmelCase ( self : List[Any] ) -> List[Any]:
import torch
a = load_dataset("common_voice" , "en" , split="train" , streaming=__lowerCamelCase )
a = ds.cast_column("audio" , datasets.Audio(sampling_rate=1_60_00 ) )
a = iter(__lowerCamelCase )
a = next(__lowerCamelCase )
a = AutoProcessor.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
a = WavaVecaForCTC.from_pretrained("patrickvonplaten/wav2vec2-base-100h-with-lm" )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
a = processor(sample["audio"]["array"] , return_tensors="pt" ).input_values
with torch.no_grad():
a = model(__lowerCamelCase ).logits.cpu().numpy()
a = processor.decode(logits[0] , output_word_offsets=__lowerCamelCase )
a = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
a = [
{
"start_time": d["start_offset"] * time_offset,
"end_time": d["end_offset"] * time_offset,
"word": d["word"],
}
for d in output["word_offsets"]
]
a = "WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL"
# output words
self.assertEqual(" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) , __lowerCamelCase )
self.assertEqual(" ".join(self.get_from_offsets(__lowerCamelCase , "word" ) ) , output.text )
# output times
a = torch.tensor(self.get_from_offsets(__lowerCamelCase , "start_time" ) )
a = torch.tensor(self.get_from_offsets(__lowerCamelCase , "end_time" ) )
# fmt: off
a = torch.tensor([1.4_199, 1.6_599, 2.2_599, 3.0, 3.24, 3.5_999, 3.7_999, 4.0_999, 4.26, 4.94, 5.28, 5.6_599, 5.78, 5.94, 6.32, 6.5_399, 6.6_599] )
a = torch.tensor([1.5_399, 1.8_999, 2.9, 3.16, 3.5_399, 3.72, 4.0_199, 4.1_799, 4.76, 5.1_599, 5.5_599, 5.6_999, 5.86, 6.1_999, 6.38, 6.6_199, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=0.01 ) )
self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=0.01 ) )
| 107
|
import math
def snake_case__ ( SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
lowercase__ : Optional[Any] = []
lowercase__ : str = 2
lowercase__ : Optional[Any] = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) # Size of every segment
lowercase__ : Dict = [True] * (end + 1)
lowercase__ : Union[str, Any] = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE_ )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE_ ):
lowercase__ : int = False
start += 1
prime += in_prime
lowercase__ : Optional[int] = end + 1
lowercase__ : List[str] = min(2 * end , SCREAMING_SNAKE_CASE_ )
while low <= n:
lowercase__ : str = [True] * (high - low + 1)
for each in in_prime:
lowercase__ : str = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE_ , high + 1 , SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[Any] = False
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if temp[j] is True:
prime.append(j + low )
lowercase__ : Optional[Any] = high + 1
lowercase__ : Optional[int] = min(high + end , SCREAMING_SNAKE_CASE_ )
return prime
print(sieve(10**6))
| 214
| 0
|
from collections import Counter
from timeit import timeit
def __lowerCamelCase ( lowerCamelCase__ = "" , ):
"""simple docstring"""
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
if len(lowerCamelCase__ ) == 0:
return True
lowercase__ : str = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
lowercase__ : dict[str, int] = {}
for character in lower_case_input_str:
lowercase__ : List[Any] = character_freq_dict.get(lowerCamelCase__ , 0 ) + 1
lowercase__ : List[Any] = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def __lowerCamelCase ( lowerCamelCase__ = "" ):
"""simple docstring"""
print("\nFor string = " , lowerCamelCase__ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(lowerCamelCase__ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
lowerCAmelCase__ = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
lowerCAmelCase__ = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f'''{check_str} can {"" if status else "not "}be rearranged as a palindrome''')
| 121
|
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class snake_case__(unittest.TestCase ):
"""simple docstring"""
@property
def snake_case ( self : Any ):
torch.manual_seed(0 )
lowercase__ : Tuple = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
@property
def snake_case ( self : List[str] ):
torch.manual_seed(0 )
lowercase__ : Optional[int] = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , )
return model
@property
def snake_case ( self : Dict ):
torch.manual_seed(0 )
lowercase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE )
def snake_case ( self : str ):
lowercase__ : Any = self.dummy_uncond_unet
lowercase__ : Dict = DDIMScheduler()
lowercase__ : Optional[Any] = self.dummy_vq_model
lowercase__ : Union[str, Any] = LDMPipeline(unet=SCREAMING_SNAKE_CASE , vqvae=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : int = torch.manual_seed(0 )
lowercase__ : Optional[int] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" ).images
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : List[Any] = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="numpy" , return_dict=SCREAMING_SNAKE_CASE )[0]
lowercase__ : Any = image[0, -3:, -3:, -1]
lowercase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__ : List[Any] = np.array([0.8_512, 0.818, 0.6_411, 0.6_808, 0.4_465, 0.5_618, 0.46, 0.6_231, 0.5_172] )
lowercase__ : Optional[Any] = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
lowercase__ : int = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256" )
ldm.to(SCREAMING_SNAKE_CASE )
ldm.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = torch.manual_seed(0 )
lowercase__ : Tuple = ldm(generator=SCREAMING_SNAKE_CASE , num_inference_steps=5 , output_type="numpy" ).images
lowercase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowercase__ : Optional[Any] = np.array([0.4_399, 0.44_975, 0.46_825, 0.474, 0.4_359, 0.4_581, 0.45_095, 0.4_341, 0.4_447] )
lowercase__ : int = 1E-2 if torch_device != "mps" else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 121
| 1
|
"""simple docstring"""
def A_ ( _lowerCAmelCase : str ):
"""simple docstring"""
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 320
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__snake_case = None
__snake_case = logging.get_logger(__name__)
__snake_case = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
__snake_case = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
__snake_case = {
'''google/rembert''': 256,
}
__snake_case = '''▁'''
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Optional[Any] = VOCAB_FILES_NAMES
A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = RemBertTokenizer
def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<unk>" , __UpperCAmelCase="[SEP]" , __UpperCAmelCase="<pad>" , __UpperCAmelCase="[CLS]" , __UpperCAmelCase="[MASK]" , **__UpperCAmelCase , ) -> List[Any]:
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , **__UpperCAmelCase , )
_a = do_lower_case
_a = remove_space
_a = keep_accents
_a = vocab_file
_a = False if not self.vocab_file else True
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'''You should not supply a second sequence if the provided sequence of '''
'''ids is already formatted with special tokens for the model.''' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase )) + [1]
return [1] + ([0] * len(__UpperCAmelCase )) + [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> List[int]:
_a = [self.sep_token_id]
_a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error('''Vocabulary path ({}) should be a directory'''.format(__UpperCAmelCase ) )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 320
| 1
|
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def A_ ( snake_case_ : Dataset ,snake_case_ : Dict[str, str] ):
'''simple docstring'''
UpperCamelCase : List[str] = args.log_outputs
UpperCamelCase : Tuple = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
UpperCamelCase : List[Any] = load_metric("""wer""" )
UpperCamelCase : Any = load_metric("""cer""" )
# compute metrics
UpperCamelCase : str = wer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
UpperCamelCase : Dict = cer.compute(references=result["""target"""] ,predictions=result["""prediction"""] )
# print & log results
UpperCamelCase : Optional[int] = f'WER: {wer_result}\nCER: {cer_result}'
print(snake_case_ )
with open(f'{dataset_id}_eval_results.txt' ,"""w""" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
UpperCamelCase : Optional[Any] = f'log_{dataset_id}_predictions.txt'
UpperCamelCase : str = f'log_{dataset_id}_targets.txt'
with open(snake_case_ ,"""w""" ) as p, open(snake_case_ ,"""w""" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] ,snake_case_ : Tuple ):
p.write(f'{i}' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(f'{i}' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(snake_case_ ,with_indices=snake_case_ )
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
UpperCamelCase : str = re.sub(snake_case_ ,"""""" ,text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
UpperCamelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
UpperCamelCase : Tuple = """ """.join(text.split(snake_case_ ) )
return text
def A_ ( snake_case_ : str ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = load_dataset(args.dataset ,args.config ,split=args.split ,use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
UpperCamelCase : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id )
UpperCamelCase : Dict = feature_extractor.sampling_rate
# resample audio
UpperCamelCase : Optional[Any] = dataset.cast_column("""audio""" ,Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
UpperCamelCase : int = 0 if torch.cuda.is_available() else -1
UpperCamelCase : Union[str, Any] = pipeline("""automatic-speech-recognition""" ,model=args.model_id ,device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Union[str, Any] ):
UpperCamelCase : List[Any] = asr(
batch["""audio"""]["""array"""] ,chunk_length_s=args.chunk_length_s ,stride_length_s=args.stride_length_s )
UpperCamelCase : Union[str, Any] = prediction["""text"""]
UpperCamelCase : Optional[Any] = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
UpperCamelCase : Any = dataset.map(snake_case_ ,remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ ,snake_case_ )
if __name__ == "__main__":
__A : List[str] = argparse.ArgumentParser()
parser.add_argument(
'''--model_id''', type=str, required=True, help='''Model identifier. Should be loadable with 🤗 Transformers'''
)
parser.add_argument(
'''--dataset''',
type=str,
required=True,
help='''Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets''',
)
parser.add_argument(
'''--config''', type=str, required=True, help='''Config of the dataset. *E.g.* `\'en\'` for Common Voice'''
)
parser.add_argument('''--split''', type=str, required=True, help='''Split of the dataset. *E.g.* `\'test\'`''')
parser.add_argument(
'''--chunk_length_s''', type=float, default=None, help='''Chunk length in seconds. Defaults to 5 seconds.'''
)
parser.add_argument(
'''--stride_length_s''', type=float, default=None, help='''Stride of the audio chunks. Defaults to 1 second.'''
)
parser.add_argument(
'''--log_outputs''', action='''store_true''', help='''If defined, write outputs to log file for analysis.'''
)
parser.add_argument(
'''--device''',
type=int,
default=None,
help='''The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.''',
)
__A : Optional[Any] = parser.parse_args()
main(args)
| 370
|
"""simple docstring"""
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def A_ ( snake_case_ : int ): # picklable for multiprocessing
'''simple docstring'''
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def A_ ( ):
'''simple docstring'''
with parallel_backend("""spark""" ):
assert ParallelBackendConfig.backend_name == "spark"
UpperCamelCase : Optional[Any] = [1, 2, 3]
with pytest.raises(snake_case_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(snake_case_ ,snake_case_ ,num_proc=2 )
with pytest.raises(snake_case_ ):
with parallel_backend("""unsupported backend""" ):
map_nested(snake_case_ ,snake_case_ ,num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize("""num_proc""" ,[2, -1] )
def A_ ( snake_case_ : List[str] ):
'''simple docstring'''
UpperCamelCase : List[Any] = [1, 2]
UpperCamelCase : List[Any] = {"""a""": 1, """b""": 2}
UpperCamelCase : List[str] = {"""a""": [1, 2], """b""": [3, 4]}
UpperCamelCase : Tuple = {"""a""": {"""1""": 1}, """b""": 2}
UpperCamelCase : Any = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
UpperCamelCase : Optional[int] = [2, 3]
UpperCamelCase : List[str] = {"""a""": 2, """b""": 3}
UpperCamelCase : Any = {"""a""": [2, 3], """b""": [4, 5]}
UpperCamelCase : Tuple = {"""a""": {"""1""": 2}, """b""": 3}
UpperCamelCase : List[str] = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
with parallel_backend("""spark""" ):
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
assert map_nested(snake_case_ ,snake_case_ ,num_proc=snake_case_ ) == expected_map_nested_sa
| 27
| 0
|
"""simple docstring"""
import torch
def _snake_case ( ):
if torch.cuda.is_available():
UpperCAmelCase : int = torch.cuda.device_count()
else:
UpperCAmelCase : Optional[int] = 0
print(F"Successfully ran on {num_gpus} GPUs" )
if __name__ == "__main__":
main()
| 109
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Union[str, Any] = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
_lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 93
| 0
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 360
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
lowercase__ = re.compile(r"([A-Z]+)([A-Z][a-z])")
lowercase__ = re.compile(r"([a-z\d])([A-Z])")
lowercase__ = re.compile(r"(?<!_)_(?!_)")
lowercase__ = re.compile(r"(_{2,})")
lowercase__ = r"^\w+(\.\w+)*$"
lowercase__ = r"<>:/\|?*"
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _uppercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
UpperCAmelCase : str = _lowercase_uppercase_re.sub(R'\1_\2' , UpperCAmelCase_ )
return name.lower()
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : List[str] = _single_underscore_re.split(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = [_multiple_underscores_re.split(UpperCAmelCase_ ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(UpperCAmelCase_ ) if n != '' )
def UpperCamelCase( UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
if os.path.basename(UpperCAmelCase_ ) != name:
raise ValueError(F"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re , UpperCAmelCase_ ):
raise ValueError(F"""Split name should match '{_split_re}'' but got '{split}'.""" )
return F"""{filename_prefix_for_name(UpperCAmelCase_ )}-{split}"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ):
UpperCAmelCase : Dict = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
if filetype_suffix:
prefix += F""".{filetype_suffix}"""
UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
return F"""{filepath}*"""
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ):
UpperCAmelCase : Optional[int] = filename_prefix_for_split(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : str = os.path.join(UpperCAmelCase_ , UpperCAmelCase_ )
if shard_lengths:
UpperCAmelCase : List[str] = len(UpperCAmelCase_ )
UpperCAmelCase : List[str] = [F"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(UpperCAmelCase_ )]
if filetype_suffix:
UpperCAmelCase : Dict = [filename + F""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
UpperCAmelCase : Optional[Any] = prefix
if filetype_suffix:
filename += F""".{filetype_suffix}"""
return [filename]
| 280
| 0
|
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class snake_case_( a__ ):
__UpperCamelCase = 42
__UpperCamelCase = 42
class snake_case_( a__ , a__ ):
__UpperCamelCase = 1
@register_to_config
def __init__( self : str , UpperCamelCase_ : int = 2_0_0_0 , UpperCamelCase_ : float = 0.15 , UpperCamelCase_ : float = 0.01 , UpperCamelCase_ : float = 1_348.0 , UpperCamelCase_ : float = 1E-5 , UpperCamelCase_ : int = 1 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase : Union[str, Any] = sigma_max
# setable values
lowerCAmelCase : Dict = None
self.set_sigmas(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[int] = None ):
return sample
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : float = None , UpperCamelCase_ : Union[str, torch.device] = None ):
lowerCAmelCase : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
lowerCAmelCase : Dict = torch.linspace(1 , UpperCamelCase_ , UpperCamelCase_ , device=UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : int , UpperCamelCase_ : float = None , UpperCamelCase_ : float = None , UpperCamelCase_ : float = None ):
lowerCAmelCase : Optional[Any] = sigma_min if sigma_min is not None else self.config.sigma_min
lowerCAmelCase : Union[str, Any] = sigma_max if sigma_max is not None else self.config.sigma_max
lowerCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(UpperCamelCase_ , UpperCamelCase_ )
lowerCAmelCase : Optional[int] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
lowerCAmelCase : Any = torch.exp(torch.linspace(math.log(UpperCamelCase_ ) , math.log(UpperCamelCase_ ) , UpperCamelCase_ ) )
lowerCAmelCase : Dict = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ):
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : int , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
lowerCAmelCase : Optional[int] = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
lowerCAmelCase : Any = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
lowerCAmelCase : int = timesteps.to(self.discrete_sigmas.device )
lowerCAmelCase : Union[str, Any] = self.discrete_sigmas[timesteps].to(sample.device )
lowerCAmelCase : Any = self.get_adjacent_sigma(UpperCamelCase_ , UpperCamelCase_ ).to(sample.device )
lowerCAmelCase : Union[str, Any] = torch.zeros_like(UpperCamelCase_ )
lowerCAmelCase : str = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
lowerCAmelCase : List[str] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
lowerCAmelCase : List[Any] = diffusion.unsqueeze(-1 )
lowerCAmelCase : int = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
lowerCAmelCase : str = randn_tensor(
sample.shape , layout=sample.layout , generator=UpperCamelCase_ , device=sample.device , dtype=sample.dtype )
lowerCAmelCase : Optional[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
lowerCAmelCase : Union[str, Any] = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=UpperCamelCase_ , prev_sample_mean=UpperCamelCase_ )
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : bool = True , ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
lowerCAmelCase : Union[str, Any] = randn_tensor(sample.shape , layout=sample.layout , generator=UpperCamelCase_ ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
lowerCAmelCase : Tuple = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase : str = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
lowerCAmelCase : Optional[int] = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
lowerCAmelCase : Tuple = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
lowerCAmelCase : Any = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
lowerCAmelCase : str = step_size.unsqueeze(-1 )
lowerCAmelCase : Optional[Any] = sample + step_size * model_output
lowerCAmelCase : List[str] = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase_ )
def lowerCamelCase__ ( self : str , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , UpperCamelCase_ : torch.FloatTensor , ):
# Make sure sigmas and timesteps have the same device and dtype as original_samples
lowerCAmelCase : List[Any] = timesteps.to(original_samples.device )
lowerCAmelCase : Optional[int] = self.discrete_sigmas.to(original_samples.device )[timesteps]
lowerCAmelCase : str = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(UpperCamelCase_ ) * sigmas[:, None, None, None]
)
lowerCAmelCase : int = noise + original_samples
return noisy_samples
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 60
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Union[str, Any] = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __magic_name__ ( lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE = 'ctrl'
SCREAMING_SNAKE_CASE = ['past_key_values']
SCREAMING_SNAKE_CASE = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __snake_case=24_6534 , __snake_case=256 , __snake_case=1280 , __snake_case=8192 , __snake_case=48 , __snake_case=16 , __snake_case=0.1 , __snake_case=0.1 , __snake_case=1e-6 , __snake_case=0.02 , __snake_case=True , **__snake_case , ) -> Union[str, Any]:
'''simple docstring'''
__a =vocab_size
__a =n_positions
__a =n_embd
__a =n_layer
__a =n_head
__a =dff
__a =resid_pdrop
__a =embd_pdrop
__a =layer_norm_epsilon
__a =initializer_range
__a =use_cache
super().__init__(**__snake_case )
| 218
| 0
|
from math import factorial
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError("Please enter positive integers for n and k where n >= k" )
return factorial(__lowerCamelCase ) // (factorial(__lowerCamelCase ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'If a class of 40 students must be arranged into groups of',
f"4 for group projects, there are {combinations(40, 4)} ways",
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f"are {combinations(10, 3)} ways that first, second and",
'third place can be awarded.',
)
| 325
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ ='src/diffusers'
UpperCamelCase__ ='.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ =importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ =spec.loader.load_module()
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
return line.startswith(__lowerCamelCase ) or len(__lowerCamelCase ) <= 1 or re.search(R"^\s*\)(\s*->.*:|:)\s*$", __lowerCamelCase ) is not None
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = object_name.split("." )
_SCREAMING_SNAKE_CASE : List[Any] = 0
# First let's find the module where our object lives.
_SCREAMING_SNAKE_CASE : Any = parts[i]
while i < len(__lowerCamelCase ) and not os.path.isfile(os.path.join(__lowerCamelCase, f"""{module}.py""" ) ):
i += 1
if i < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Any = os.path.join(__lowerCamelCase, parts[i] )
if i >= len(__lowerCamelCase ):
raise ValueError(f"""`object_name` should begin with the name of a module of diffusers but got {object_name}.""" )
with open(os.path.join(__lowerCamelCase, f"""{module}.py""" ), "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
# Now let's find the class / func in the code!
_SCREAMING_SNAKE_CASE : Union[str, Any] = ""
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for name in parts[i + 1 :]:
while (
line_index < len(__lowerCamelCase ) and re.search(Rf"""^{indent}(class|def)\s+{name}(\(|\:)""", lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__lowerCamelCase ):
raise ValueError(f""" {object_name} does not match any function or class in {module}.""" )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
_SCREAMING_SNAKE_CASE : Optional[int] = line_index
while line_index < len(__lowerCamelCase ) and _should_continue(lines[line_index], __lowerCamelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : Optional[int] = lines[start_index:line_index]
return "".join(__lowerCamelCase )
UpperCamelCase__ =re.compile(R'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ =re.compile(R'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ =re.compile(R'<FILL\s+[^>]*>')
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = code.split("\n" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = 0
while idx < len(__lowerCamelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__lowerCamelCase ):
return re.search(R"^(\s*)\S", lines[idx] ).groups()[0]
return ""
def lowerCamelCase__ (__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = len(get_indent(__lowerCamelCase ) ) > 0
if has_indent:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f"""class Bla:\n{code}"""
_SCREAMING_SNAKE_CASE : Any = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119, preview=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = black.format_str(__lowerCamelCase, mode=__lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = style_docstrings_in_code(__lowerCamelCase )
return result[len("class Bla:\n" ) :] if has_indent else result
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase=False ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : int = f.readlines()
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Union[str, Any] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = search.groups()
_SCREAMING_SNAKE_CASE : Any = find_code_in_diffusers(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_indent(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
_SCREAMING_SNAKE_CASE : int = theoretical_indent
_SCREAMING_SNAKE_CASE : str = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
_SCREAMING_SNAKE_CASE : Any = True
while line_index < len(__lowerCamelCase ) and should_continue:
line_index += 1
if line_index >= len(__lowerCamelCase ):
break
_SCREAMING_SNAKE_CASE : Union[str, Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : str = _should_continue(__lowerCamelCase, __lowerCamelCase ) and re.search(f"""^{indent}# End copy""", __lowerCamelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
_SCREAMING_SNAKE_CASE : List[Any] = lines[start_index:line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = "".join(__lowerCamelCase )
# Remove any nested `Copied from` comments to avoid circular copies
_SCREAMING_SNAKE_CASE : Dict = [line for line in theoretical_code.split("\n" ) if _re_copy_warning.search(__lowerCamelCase ) is None]
_SCREAMING_SNAKE_CASE : str = "\n".join(__lowerCamelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = replace_pattern.replace("with", "" ).split("," )
_SCREAMING_SNAKE_CASE : Union[str, Any] = [_re_replace_pattern.search(__lowerCamelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = pattern.groups()
_SCREAMING_SNAKE_CASE : Tuple = re.sub(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if option.strip() == "all-casing":
_SCREAMING_SNAKE_CASE : List[Any] = re.sub(obja.lower(), obja.lower(), __lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = re.sub(obja.upper(), obja.upper(), __lowerCamelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
_SCREAMING_SNAKE_CASE : int = blackify(lines[start_index - 1] + theoretical_code )
_SCREAMING_SNAKE_CASE : List[str] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
_SCREAMING_SNAKE_CASE : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
_SCREAMING_SNAKE_CASE : int = start_index + 1
if overwrite and len(__lowerCamelCase ) > 0:
# Warn the user a file has been modified.
print(f"""Detected changes, rewriting {filename}.""" )
with open(__lowerCamelCase, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(__lowerCamelCase )
return diffs
def lowerCamelCase__ (__lowerCamelCase = False ):
_SCREAMING_SNAKE_CASE : int = glob.glob(os.path.join(__lowerCamelCase, "**/*.py" ), recursive=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for filename in all_files:
_SCREAMING_SNAKE_CASE : int = is_copy_consistent(__lowerCamelCase, __lowerCamelCase )
diffs += [f"""- {filename}: copy does not match {d[0]} at line {d[1]}""" for d in new_diffs]
if not overwrite and len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Dict = "\n".join(__lowerCamelCase )
raise Exception(
"Found the following copy inconsistencies:\n"
+ diff
+ "\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them." )
if __name__ == "__main__":
UpperCamelCase__ =argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ =parser.parse_args()
check_copies(args.fix_and_overwrite)
| 325
| 1
|
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase__ ( a , a , a = 1 , a = 1 , a = 1.0E4 , a = False , a = 1.0 , ) -> jnp.ndarray:
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even"""
_A: int = float(embedding_dim // 2 )
_A: Any = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_A: Optional[int] = min_timescale * jnp.exp(jnp.arange(a , dtype=jnp.floataa ) * -log_timescale_increment )
_A: str = jnp.expand_dims(a , 1 ) * jnp.expand_dims(a , 0 )
# scale embeddings
_A: Union[str, Any] = scale * emb
if flip_sin_to_cos:
_A: Any = jnp.concatenate([jnp.cos(a ), jnp.sin(a )] , axis=1 )
else:
_A: Dict = jnp.concatenate([jnp.sin(a ), jnp.cos(a )] , axis=1 )
_A: List[str] = jnp.reshape(a , [jnp.shape(a )[0], embedding_dim] )
return signal
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
__UpperCamelCase : int = 32
__UpperCamelCase : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : str , lowerCAmelCase_ : int ):
"""simple docstring"""
_A: Optional[int] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(lowerCAmelCase_ )
_A: Union[str, Any] = nn.silu(lowerCAmelCase_ )
_A: Tuple = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(lowerCAmelCase_ )
return temb
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
__UpperCamelCase : int = 32
__UpperCamelCase : bool = False
__UpperCamelCase : float = 1
@nn.compact
def __call__( self : Dict , lowerCAmelCase_ : Dict ):
"""simple docstring"""
return get_sinusoidal_embeddings(
lowerCAmelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 121
|
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCamelCase__ ( a , a ) -> Dict:
if "xprophetnet" in prophetnet_checkpoint_path:
_A: List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Union[str, Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
else:
_A: Dict = ProphetNetForConditionalGenerationOld.from_pretrained(a )
_A , _A: Tuple = ProphetNetForConditionalGeneration.from_pretrained(
a , output_loading_info=a )
_A: Optional[int] = ['''key_proj''', '''value_proj''', '''query_proj''']
_A: List[Any] = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
_A: List[str] = key.split('''.''' )
if attributes[0] == "lm_head":
_A: Optional[int] = prophet
_A: Tuple = prophet_old
else:
_A: Tuple = prophet.prophetnet
_A: Any = prophet_old.model
_A: int = False
for attribute in attributes:
if attribute in mapping:
_A: Optional[int] = mapping[attribute]
if not hasattr(a , a ) and len(a ) > 0:
_A: int = attribute
elif hasattr(a , a ):
_A: Tuple = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_A: Union[str, Any] = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_A: Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_A: str = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_A: Dict = True
break
elif attribute in special_keys and hasattr(a , '''in_proj_weight''' ):
_A: Optional[int] = old_model.in_proj_weight.shape[0] // 3
_A: Tuple = getattr(a , a )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_A: List[str] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_A: List[Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_A: int = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_A: Optional[int] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_A: List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_A: int = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_A: Tuple = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
_A: Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
_A: List[Any] = True
break
if attribute.isdigit():
_A: Tuple = model[int(a )]
_A: int = old_model[int(a )]
else:
_A: Union[str, Any] = getattr(a , a )
if old_attribute == "":
_A: Union[str, Any] = old_model
else:
if not hasattr(a , a ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_A: List[Any] = getattr(a , a )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Tuple = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 121
| 1
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
if num <= 0:
raise ValueError('''Input must be a positive integer''')
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p, num + 1, lowerCamelCase):
__lowerCAmelCase = False
p += 1
return [prime for prime in range(2, num + 1) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase : Tuple = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 358
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : str = True
__UpperCamelCase : Any = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCAmelCase = {'''unk_token''': '''[UNK]'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = '''lower newer'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer('''Hello''' , '''World''' )
__lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowercase )
@slow
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
__lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase )
__lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']]
# fmt: off
__lowerCAmelCase = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowercase )
for expected, decoded in zip(__lowercase , __lowercase ):
self.assertEqual(__lowercase , __lowercase )
| 9
| 0
|
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( lowerCAmelCase_ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'FlavaImageProcessor'
lowercase = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self : Optional[Any] , a : List[Any]=None , a : int=None , **a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , __a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(__a , __a )
lowerCAmelCase__ : Optional[int] = self.image_processor
def __call__( self : Tuple , a : int = None , a : Union[str, Any] = None , a : Optional[Any] = True , a : Tuple = False , a : Optional[int] = False , a : List[str] = None , a : Tuple = 0 , a : Optional[int] = None , a : Optional[int] = None , a : Optional[int] = None , a : List[Any] = None , a : Dict = None , a : int = False , a : str = False , a : Any = False , a : Dict = False , a : int = True , a : str = None , **a : List[Any] , ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowerCAmelCase__ : Union[str, Any] = self.tokenizer(
text=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
if images is not None:
lowerCAmelCase__ : Optional[Any] = self.image_processor(
__a , return_image_mask=__a , return_codebook_pixels=__a , return_tensors=__a , **__a , )
if text is not None and images is not None:
encoding.update(__a )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def _lowerCamelCase ( self : Optional[Any] , *a : int , **a : str ):
'''simple docstring'''
return self.tokenizer.batch_decode(*__a , **__a )
def _lowerCamelCase ( self : Dict , *a : Any , **a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*__a , **__a )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.tokenizer.model_input_names
lowerCAmelCase__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , __a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , __a , )
return self.image_processor
| 212
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase : str = {
'configuration_data2vec_audio': ['DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Data2VecAudioConfig'],
'configuration_data2vec_text': [
'DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecTextConfig',
'Data2VecTextOnnxConfig',
],
'configuration_data2vec_vision': [
'DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Data2VecVisionConfig',
'Data2VecVisionOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : str = [
'DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecAudioForAudioFrameClassification',
'Data2VecAudioForCTC',
'Data2VecAudioForSequenceClassification',
'Data2VecAudioForXVector',
'Data2VecAudioModel',
'Data2VecAudioPreTrainedModel',
]
__lowercase : Tuple = [
'DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecTextForCausalLM',
'Data2VecTextForMaskedLM',
'Data2VecTextForMultipleChoice',
'Data2VecTextForQuestionAnswering',
'Data2VecTextForSequenceClassification',
'Data2VecTextForTokenClassification',
'Data2VecTextModel',
'Data2VecTextPreTrainedModel',
]
__lowercase : Dict = [
'DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST',
'Data2VecVisionForImageClassification',
'Data2VecVisionForMaskedImageModeling',
'Data2VecVisionForSemanticSegmentation',
'Data2VecVisionModel',
'Data2VecVisionPreTrainedModel',
]
if is_tf_available():
__lowercase : Optional[Any] = [
'TFData2VecVisionForImageClassification',
'TFData2VecVisionForSemanticSegmentation',
'TFData2VecVisionModel',
'TFData2VecVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
__lowercase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = 42
class _UpperCAmelCase ( snake_case_ , snake_case_ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 3 , __UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , __UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , __UpperCAmelCase : Tuple[int] = (64,) , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "silu" , __UpperCAmelCase : int = 3 , __UpperCAmelCase : int = 32 , __UpperCAmelCase : int = 256 , __UpperCAmelCase : int = 32 , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : float = 0.18215 , __UpperCAmelCase : str = "group" , ):
'''simple docstring'''
super().__init__()
# pass init params to Encoder
_A = Encoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , down_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , double_z=__UpperCAmelCase , )
_A = vq_embed_dim if vq_embed_dim is not None else latent_channels
_A = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
_A = VectorQuantizer(__UpperCAmelCase , __UpperCAmelCase , beta=0.25 , remap=__UpperCAmelCase , sane_index_shape=__UpperCAmelCase )
_A = nn.Convad(__UpperCAmelCase , __UpperCAmelCase , 1 )
# pass init params to Decoder
_A = Decoder(
in_channels=__UpperCAmelCase , out_channels=__UpperCAmelCase , up_block_types=__UpperCAmelCase , block_out_channels=__UpperCAmelCase , layers_per_block=__UpperCAmelCase , act_fn=__UpperCAmelCase , norm_num_groups=__UpperCAmelCase , norm_type=__UpperCAmelCase , )
@apply_forward_hook
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ):
'''simple docstring'''
_A = self.encoder(__UpperCAmelCase )
_A = self.quant_conv(__UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__UpperCAmelCase )
@apply_forward_hook
def lowerCAmelCase ( self : str , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = True ):
'''simple docstring'''
if not force_not_quantize:
_A , _A , _A = self.quantize(__UpperCAmelCase )
else:
_A = h
_A = self.post_quant_conv(__UpperCAmelCase )
_A = self.decoder(__UpperCAmelCase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : torch.FloatTensor , __UpperCAmelCase : bool = True ):
'''simple docstring'''
_A = sample
_A = self.encode(__UpperCAmelCase ).latents
_A = self.decode(__UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__UpperCAmelCase )
| 352
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase_ = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase_ = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def _SCREAMING_SNAKE_CASE ( a , a ) -> Optional[Any]:
__A : Any = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 10_24,
'hidden_size': 7_68,
'max_length': 5_12,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 10_24,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__A : str = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__A : Optional[int] = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=a , output_all_encodings=a , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , a ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__A : Union[str, Any] = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__A : Any = os.path.join(get_home_dir() , 'models' )
__A : List[Any] = _load_vocab(a , a , a , cls=a )
__A : Dict = nlp.model.BERTModel(
a , len(a ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=a , use_token_type_embed=a , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=a , use_decoder=a , )
original_bort.load_parameters(a , cast_dtype=a , ignore_extra=a )
__A : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__A : Any = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(a ),
}
__A : int = BertConfig.from_dict(a )
__A : Union[str, Any] = BertForMaskedLM(a )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(a ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(a , a ):
__A : Tuple = hf_param.shape
__A : str = to_torch(params[gluon_param] )
__A : Union[str, Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
__A : str = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__A : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__A : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__A : Tuple = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__A : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__A : BertSelfAttention = layer.attention.self
__A : Optional[Any] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
__A : Optional[Any] = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
__A : Union[str, Any] = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
__A : Optional[int] = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
__A : BertSelfOutput = layer.attention.output
__A : Tuple = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
__A : int = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
__A : List[Any] = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
__A : str = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
__A : BertIntermediate = layer.intermediate
__A : int = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
__A : List[Any] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
__A : BertOutput = layer.output
__A : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
__A : Dict = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
__A : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
__A : Dict = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__A : Any = RobertaTokenizer.from_pretrained('roberta-base' )
__A : List[str] = tokenizer.encode_plus(a )['input_ids']
# Get gluon output
__A : List[str] = mx.nd.array([input_ids] )
__A : Union[str, Any] = original_bort(inputs=a , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(a )
__A : Optional[Any] = BertModel.from_pretrained(a )
hf_bort_model.eval()
__A : Tuple = tokenizer.encode_plus(a , return_tensors='pt' )
__A : Any = hf_bort_model(**a )[0]
__A : Union[str, Any] = output_gluon[0].asnumpy()
__A : Tuple = output_hf[0].detach().numpy()
__A : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__A : int = np.allclose(a , a , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , a )
if __name__ == "__main__":
UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
UpperCAmelCase : Dict = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 280
| 0
|
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def lowercase_ ( __UpperCAmelCase ) -> Callable:
"""simple docstring"""
@wraps(lowercase__ )
def _inner_fn(*__UpperCAmelCase , **__UpperCAmelCase ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , lowercase__ , )
return fn(*lowercase__ , **lowercase__ )
return _inner_fn
| 351
|
"""simple docstring"""
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
return "\n".join(
f"""{number} * {i} = {number * i}""" for i in range(1 , number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=1_0))
| 212
| 0
|
from math import factorial
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(SCREAMING_SNAKE_CASE ) // (factorial(SCREAMING_SNAKE_CASE ) * factorial(n - k ))
if __name__ == "__main__":
print(
"""The number of five-card hands possible from a standard""",
F'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"""If a class of 40 students must be arranged into groups of""",
F'''4 for group projects, there are {combinations(40, 4)} ways''',
"""to arrange them.\n""",
)
print(
"""If 10 teams are competing in a Formula One race, there""",
F'''are {combinations(10, 3)} ways that first, second and''',
"""third place can be awarded.""",
)
| 325
|
import math
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : int ) -> bool:
assert isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
__lowercase = range(3 , int(math.sqrt(SCREAMING_SNAKE_CASE ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def __SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Tuple=1 , **SCREAMING_SNAKE_CASE : Tuple ) -> Dict:
__lowercase = factor * value
__lowercase = value
while not is_prime(SCREAMING_SNAKE_CASE ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **SCREAMING_SNAKE_CASE )
return value
| 325
| 1
|
import random
import unittest
import torch
from diffusers import IFInpaintingSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : int = IFInpaintingSuperResolutionPipeline
lowerCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS.union({"""original_image"""} )
lowerCAmelCase : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __A ( self ):
return self._get_superresolution_dummy_components()
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__=0 ):
if str(UpperCAmelCase__ ).startswith("mps" ):
A__ = torch.manual_seed(UpperCAmelCase__ )
else:
A__ = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A__ = floats_tensor((1, 3, 16, 16) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
A__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
A__ = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __A ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def __A ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def __A ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __A ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __A ( self ):
self._test_save_load_local()
def __A ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 198
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
UpperCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
lowerCAmelCase : List[str] = """linear"""
lowerCAmelCase : int = """cosine"""
lowerCAmelCase : Dict = """cosine_with_restarts"""
lowerCAmelCase : Optional[Any] = """polynomial"""
lowerCAmelCase : Dict = """constant"""
lowerCAmelCase : Any = """constant_with_warmup"""
lowerCAmelCase : Union[str, Any] = """piecewise_constant"""
def UpperCamelCase ( _A : Optimizer , _A : int = -1 )-> Dict:
"""simple docstring"""
return LambdaLR(_A , lambda _A : 1 , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int = -1 )-> Optional[Any]:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1.0 , _A ) )
return 1.0
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Optimizer , _A : str , _A : int = -1 )-> Dict:
"""simple docstring"""
A__ = {}
A__ = step_rules.split("," )
for rule_str in rule_list[:-1]:
A__ , A__ = rule_str.split(":" )
A__ = int(_A )
A__ = float(_A )
A__ = value
A__ = float(rule_list[-1] )
def create_rules_function(_A : Any , _A : Optional[int] ):
def rule_func(_A : int ) -> float:
A__ = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_A ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ = create_rules_function(_A , _A )
return LambdaLR(_A , _A , last_epoch=_A )
def UpperCamelCase ( _A : Any , _A : Union[str, Any] , _A : str , _A : str=-1 )-> Tuple:
"""simple docstring"""
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : float = 0.5 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_A ) * 2.0 * progress )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Optimizer , _A : int , _A : int , _A : int = 1 , _A : int = -1 )-> Any:
"""simple docstring"""
def lr_lambda(_A : Tuple ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
A__ = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_A ) * progress) % 1.0) )) )
return LambdaLR(_A , _A , _A )
def UpperCamelCase ( _A : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : Tuple=1E-7 , _A : Dict=1.0 , _A : Union[str, Any]=-1 )-> Any:
"""simple docstring"""
A__ = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_A : int ):
if current_step < num_warmup_steps:
return float(_A ) / float(max(1 , _A ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ = lr_init - lr_end
A__ = num_training_steps - num_warmup_steps
A__ = 1 - (current_step - num_warmup_steps) / decay_steps
A__ = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_A , _A , _A )
UpperCAmelCase_ : Any = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def UpperCamelCase ( _A : Union[str, SchedulerType] , _A : Optimizer , _A : Optional[str] = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : int = 1 , _A : float = 1.0 , _A : int = -1 , )-> Union[str, Any]:
"""simple docstring"""
A__ = SchedulerType(_A )
A__ = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_A , last_epoch=_A )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_A , step_rules=_A , last_epoch=_A )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_A , num_warmup_steps=_A , last_epoch=_A )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , num_cycles=_A , last_epoch=_A , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , power=_A , last_epoch=_A , )
return schedule_func(
_A , num_warmup_steps=_A , num_training_steps=_A , last_epoch=_A )
| 198
| 1
|
'''simple docstring'''
from math import factorial
def lowerCAmelCase (__A = 100):
"""simple docstring"""
return sum(int(lowercase__) for x in str(factorial(lowercase__)))
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 211
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9
| 0
|
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class __A ( unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def lowercase__ ( self : List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Tuple = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='tf' )
lowerCAmelCase : List[str] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'sequence': 'My name is grouped', 'score': 2.1E-05, 'token': 38015, 'token_str': ' grouped'},
{'sequence': 'My name is accuser', 'score': 2.1E-05, 'token': 25506, 'token_str': ' accuser'},
] , )
lowerCAmelCase : int = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'sequence': 'The largest city in France is grouped',
'score': 2.1E-05,
'token': 38015,
'token_str': ' grouped',
},
{
'sequence': 'The largest city in France is accuser',
'score': 2.1E-05,
'token': 25506,
'token_str': ' accuser',
},
] , )
lowerCAmelCase : int = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Patrick', 'score': 2E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 1.9E-05, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : str = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , top_k=2 , framework='pt' )
lowerCAmelCase : List[Any] = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'sequence': 'My name is Maul', 'score': 2.2E-05, 'token': 35676, 'token_str': ' Maul'},
{'sequence': 'My name isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'},
] , )
lowerCAmelCase : Union[str, Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{
'sequence': 'The largest city in France is Maul',
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
},
{'sequence': 'The largest city in France isELS', 'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS'},
] , )
lowerCAmelCase : str = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
{'sequence': 'My name is Patrick', 'score': 2.1E-05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Te', 'score': 2E-05, 'token': 2941, 'token_str': ' Te'},
{'sequence': 'My name is Clara', 'score': 2E-05, 'token': 13606, 'token_str': ' Clara'},
] , )
lowerCAmelCase : List[Any] = unmasker('My name is <mask> <mask>' , top_k=2 )
self.assertEqual(
nested_simplify(_lowercase , decimals=6 ) , [
[
{
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is Maul<mask></s>',
},
{'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name isELS<mask></s>'},
],
[
{
'score': 2.2E-05,
'token': 35676,
'token_str': ' Maul',
'sequence': '<s>My name is<mask> Maul</s>',
},
{'score': 2.2E-05, 'token': 16416, 'token_str': 'ELS', 'sequence': '<s>My name is<mask>ELS</s>'},
],
] , )
@require_torch_gpu
def lowercase__ ( self : Any ):
lowerCAmelCase : str = pipeline('fill-mask' , model='hf-internal-testing/tiny-random-distilbert' , device=0 , framework='pt' )
# convert model to fp16
pipe.model.half()
lowerCAmelCase : Any = pipe('Paris is the [MASK] of France.' )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(_lowercase , _lowercase )
@slow
@require_torch
def lowercase__ ( self : int ):
lowerCAmelCase : Optional[int] = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='pt' )
self.run_large_test(_lowercase )
@slow
@require_tf
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Any = pipeline(task='fill-mask' , model='distilroberta-base' , top_k=2 , framework='tf' )
self.run_large_test(_lowercase )
def lowercase__ ( self : Dict , UpperCAmelCase_ : str ):
lowerCAmelCase : int = unmasker('My name is <mask>' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'sequence': 'My name is John', 'score': 0.0_08, 'token': 610, 'token_str': ' John'},
{'sequence': 'My name is Chris', 'score': 0.0_07, 'token': 1573, 'token_str': ' Chris'},
] , )
lowerCAmelCase : Union[str, Any] = unmasker('The largest city in France is <mask>' )
self.assertEqual(
nested_simplify(_lowercase ) , [
{
'sequence': 'The largest city in France is Paris',
'score': 0.2_51,
'token': 2201,
'token_str': ' Paris',
},
{
'sequence': 'The largest city in France is Lyon',
'score': 0.2_14,
'token': 12790,
'token_str': ' Lyon',
},
] , )
lowerCAmelCase : List[str] = unmasker('My name is <mask>' , targets=[' Patrick', ' Clara', ' Teven'] , top_k=3 )
self.assertEqual(
nested_simplify(_lowercase ) , [
{'sequence': 'My name is Patrick', 'score': 0.0_05, 'token': 3499, 'token_str': ' Patrick'},
{'sequence': 'My name is Clara', 'score': 0.0_00, 'token': 13606, 'token_str': ' Clara'},
{'sequence': 'My name is Te', 'score': 0.0_00, 'token': 2941, 'token_str': ' Te'},
] , )
@require_torch
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : str = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='pt' )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Union[str, Any] = None
self.run_pipeline_test(_lowercase , [] )
@require_tf
def lowercase__ ( self : str ):
lowerCAmelCase : Optional[int] = pipeline(task='fill-mask' , model='sshleifer/tiny-distilroberta-base' , framework='tf' )
lowerCAmelCase : Any = None
lowerCAmelCase : str = None
self.run_pipeline_test(_lowercase , [] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest('The provided tokenizer has no mask token, (probably reformer or wav2vec2)' )
lowerCAmelCase : Optional[int] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : Dict = [
f"This is another {tokenizer.mask_token} test",
]
return fill_masker, examples
def lowercase__ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : str = fill_masker.tokenizer
lowerCAmelCase : Optional[Any] = fill_masker.model
lowerCAmelCase : List[Any] = fill_masker(
f"This is a {tokenizer.mask_token}" , )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : List[Any] = fill_masker([f"This is a {tokenizer.mask_token}"] )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : Dict = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."] )
self.assertEqual(
_lowercase , [
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
] , )
with self.assertRaises(_lowercase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(_lowercase ):
fill_masker('This is' )
self.run_test_top_k(_lowercase , _lowercase )
self.run_test_targets(_lowercase , _lowercase )
self.run_test_top_k_targets(_lowercase , _lowercase )
self.fill_mask_with_duplicate_targets_and_top_k(_lowercase , _lowercase )
self.fill_mask_with_multiple_masks(_lowercase , _lowercase )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] ):
lowerCAmelCase : Union[str, Any] = tokenizer.get_vocab()
lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:2]
# Pipeline argument
lowerCAmelCase : Dict = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , targets=_lowercase )
lowerCAmelCase : Union[str, Any] = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : Optional[int] = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _lowercase )
lowerCAmelCase : Tuple = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_lowercase ) )
# Call argument
lowerCAmelCase : Optional[int] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : int = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_lowercase )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : Tuple = {vocab[el] for el in targets}
self.assertEqual({el['token'] for el in outputs} , _lowercase )
lowerCAmelCase : str = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el['token_str'] for el in outputs} , set(_lowercase ) )
# Score equivalence
lowerCAmelCase : Tuple = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_lowercase )
lowerCAmelCase : Dict = [top_mask['token_str'] for top_mask in outputs]
lowerCAmelCase : List[Any] = [top_mask['score'] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ) == set(_lowercase ):
lowerCAmelCase : List[Any] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=_lowercase )
lowerCAmelCase : str = [top_mask['score'] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
# Raises with invalid
with self.assertRaises(_lowercase ):
lowerCAmelCase : Optional[int] = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(_lowercase ):
lowerCAmelCase : Any = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[''] )
with self.assertRaises(_lowercase ):
lowerCAmelCase : List[str] = fill_masker(f"This is a {tokenizer.mask_token}" , targets='' )
def lowercase__ ( self : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] ):
lowerCAmelCase : List[str] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase , top_k=2 )
lowerCAmelCase : List[Any] = fill_masker(f"This is a {tokenizer.mask_token}" )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
lowerCAmelCase : List[str] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : Dict = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_lowercase , [
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
] , )
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : Tuple = tokenizer.get_vocab()
lowerCAmelCase : int = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
# top_k=2, ntargets=3
lowerCAmelCase : Tuple = sorted(vocab.keys() )[:3]
lowerCAmelCase : Any = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=_lowercase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
lowerCAmelCase : List[str] = [el['token_str'] for el in sorted(_lowercase , key=lambda UpperCAmelCase_ : x["score"] , reverse=_lowercase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(_lowercase ).issubset(_lowercase ):
lowerCAmelCase : int = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=_lowercase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(_lowercase ) , nested_simplify(_lowercase ) )
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[Any] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : Tuple = tokenizer.get_vocab()
# String duplicates + id duplicates
lowerCAmelCase : Union[str, Any] = sorted(vocab.keys() )[:3]
lowerCAmelCase : Tuple = [targets[0], targets[1], targets[0], targets[2], targets[1]]
lowerCAmelCase : Optional[int] = fill_masker(f"My name is {tokenizer.mask_token}" , targets=_lowercase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(_lowercase ) , 3 )
def lowercase__ ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any ):
lowerCAmelCase : List[Any] = FillMaskPipeline(model=_lowercase , tokenizer=_lowercase )
lowerCAmelCase : int = fill_masker(
f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2 )
self.assertEqual(
_lowercase , [
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
[
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
{'sequence': ANY(_lowercase ), 'score': ANY(_lowercase ), 'token': ANY(_lowercase ), 'token_str': ANY(_lowercase )},
],
] , )
| 370
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class __A ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
lowerCAmelCase : List[Any] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase : str = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
lowerCAmelCase : Optional[Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
lowerCAmelCase : Tuple = {'unk_token': '<unk>'}
lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCAmelCase_ ) )
lowerCAmelCase : Dict = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCAmelCase_ )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Any , **UpperCAmelCase_ : Dict ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Tuple , **UpperCAmelCase_ : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , **UpperCAmelCase_ : Optional[int] ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : str = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase : List[Any] = [Image.fromarray(np.moveaxis(UpperCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Any ):
lowerCAmelCase : List[str] = self.get_tokenizer()
lowerCAmelCase : List[str] = self.get_rust_tokenizer()
lowerCAmelCase : Optional[int] = self.get_image_processor()
lowerCAmelCase : Optional[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
lowerCAmelCase : int = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : Tuple ):
lowerCAmelCase : Any = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase : Tuple = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowerCAmelCase : Union[str, Any] = self.get_image_processor(do_normalize=UpperCAmelCase_ , padding_value=1.0 )
lowerCAmelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=UpperCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase_ )
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Any = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : str = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = self.prepare_image_inputs()
lowerCAmelCase : List[str] = image_processor(UpperCAmelCase_ , return_tensors='np' )
lowerCAmelCase : int = processor(images=UpperCAmelCase_ , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : Union[str, Any] = self.get_image_processor()
lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
lowerCAmelCase : Dict = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[int] = 'lower newer'
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = tokenizer(UpperCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : Optional[Any] ):
lowerCAmelCase : Tuple = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[str] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = 'lower newer'
lowerCAmelCase : Optional[int] = self.prepare_image_inputs()
lowerCAmelCase : Union[str, Any] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCAmelCase_ ):
processor()
def lowercase__ ( self : List[str] ):
lowerCAmelCase : Optional[Any] = self.get_image_processor()
lowerCAmelCase : str = self.get_tokenizer()
lowerCAmelCase : Union[str, Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase : Any = processor.batch_decode(UpperCAmelCase_ )
lowerCAmelCase : List[Any] = tokenizer.batch_decode(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Union[str, Any] ):
lowerCAmelCase : List[Any] = self.get_image_processor()
lowerCAmelCase : Dict = self.get_tokenizer()
lowerCAmelCase : List[Any] = CLIPProcessor(tokenizer=UpperCAmelCase_ , image_processor=UpperCAmelCase_ )
lowerCAmelCase : Dict = 'lower newer'
lowerCAmelCase : Tuple = self.prepare_image_inputs()
lowerCAmelCase : List[str] = processor(text=UpperCAmelCase_ , images=UpperCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 323
| 0
|
from __future__ import annotations
from math import pow, sqrt
def a__ ( UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> Tuple:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCAmelCase , 2 ) - pow(UpperCAmelCase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCAmelCase , 2 ) - pow(UpperCAmelCase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCAmelCase , 2 ) + pow(UpperCAmelCase , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 336
|
'''simple docstring'''
def __magic_name__( lowerCamelCase):
__lowerCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
__lowerCAmelCase = set()
return any(
node not in visited and depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase)
for node in graph)
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
visited.add(lowerCamelCase)
rec_stk.add(lowerCamelCase)
for node in graph[vertex]:
if node not in visited:
if depth_first_search(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(lowerCamelCase)
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 174
| 0
|
"""simple docstring"""
import numpy as np
import skfuzzy as fuzz
if __name__ == "__main__":
# Create universe of discourse in Python using linspace ()
__UpperCamelCase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False)
# Create two fuzzy sets by defining any membership function
# (trapmf(), gbellmf(), gaussmf(), etc).
__UpperCamelCase = [0, 25, 50]
__UpperCamelCase = [25, 50, 75]
__UpperCamelCase = fuzz.membership.trimf(X, abca)
__UpperCamelCase = fuzz.membership.trimf(X, abca)
# Compute the different operations using inbuilt functions.
__UpperCamelCase = np.ones(75)
__UpperCamelCase = np.zeros((75,))
# 1. Union = max(µA(x), µB(x))
__UpperCamelCase = fuzz.fuzzy_or(X, young, X, middle_aged)[1]
# 2. Intersection = min(µA(x), µB(x))
__UpperCamelCase = fuzz.fuzzy_and(X, young, X, middle_aged)[1]
# 3. Complement (A) = (1- min(µA(x))
__UpperCamelCase = fuzz.fuzzy_not(young)
# 4. Difference (A/B) = min(µA(x),(1- µB(x)))
__UpperCamelCase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1]
# 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))]
__UpperCamelCase = young + middle_aged - (young * middle_aged)
# 6. Algebraic Product = (µA(x) * µB(x))
__UpperCamelCase = young * middle_aged
# 7. Bounded Sum = min[1,(µA(x), µB(x))]
__UpperCamelCase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1]
# 8. Bounded difference = min[0,(µA(x), µB(x))]
__UpperCamelCase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1]
# max-min composition
# max-product composition
# Plot each set A, set B and each operation result using plot() and subplot().
from matplotlib import pyplot as plt
plt.figure()
plt.subplot(4, 3, 1)
plt.plot(X, young)
plt.title('''Young''')
plt.grid(True)
plt.subplot(4, 3, 2)
plt.plot(X, middle_aged)
plt.title('''Middle aged''')
plt.grid(True)
plt.subplot(4, 3, 3)
plt.plot(X, union)
plt.title('''union''')
plt.grid(True)
plt.subplot(4, 3, 4)
plt.plot(X, intersection)
plt.title('''intersection''')
plt.grid(True)
plt.subplot(4, 3, 5)
plt.plot(X, complement_a)
plt.title('''complement_a''')
plt.grid(True)
plt.subplot(4, 3, 6)
plt.plot(X, difference)
plt.title('''difference a/b''')
plt.grid(True)
plt.subplot(4, 3, 7)
plt.plot(X, alg_sum)
plt.title('''alg_sum''')
plt.grid(True)
plt.subplot(4, 3, 8)
plt.plot(X, alg_product)
plt.title('''alg_product''')
plt.grid(True)
plt.subplot(4, 3, 9)
plt.plot(X, bdd_sum)
plt.title('''bdd_sum''')
plt.grid(True)
plt.subplot(4, 3, 10)
plt.plot(X, bdd_difference)
plt.title('''bdd_difference''')
plt.grid(True)
plt.subplots_adjust(hspace=0.5)
plt.show()
| 38
|
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ) -> Union[str, Any]:
return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :]
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[int]="attention" ) -> List[Any]:
SCREAMING_SNAKE_CASE = SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
SCREAMING_SNAKE_CASE = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :] )
SCREAMING_SNAKE_CASE = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowercase (SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=False ) -> List[Any]:
if split_mlp_wi:
SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :]
SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :]
SCREAMING_SNAKE_CASE = (wi_a, wi_a)
else:
SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :]
SCREAMING_SNAKE_CASE = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :]
return wi, wo
def lowercase (SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ) -> Union[str, Any]:
return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i]
def lowercase (SCREAMING_SNAKE_CASE_ : dict , *, SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool , SCREAMING_SNAKE_CASE_ : bool = False ) -> Tuple:
SCREAMING_SNAKE_CASE = traverse_util.flatten_dict(variables['target'] )
SCREAMING_SNAKE_CASE = {'/'.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
SCREAMING_SNAKE_CASE = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = collections.OrderedDict()
# Shared embeddings.
SCREAMING_SNAKE_CASE = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_attention_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'attention' )
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (MLP).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE = wi[0].T
SCREAMING_SNAKE_CASE = wi[1].T
else:
SCREAMING_SNAKE_CASE = wi.T
SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' ).T
SCREAMING_SNAKE_CASE = old['encoder/encoder_norm/scale']
if not scalable_attention:
SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , 0 , 'encoder' ).T
SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(
SCREAMING_SNAKE_CASE_ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_self_attention_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'self_attention' )
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 1 (Cross Attention).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_cross_attention_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'encoder_decoder_attention' )
SCREAMING_SNAKE_CASE = layer_norm
SCREAMING_SNAKE_CASE = k.T
SCREAMING_SNAKE_CASE = o.T
SCREAMING_SNAKE_CASE = q.T
SCREAMING_SNAKE_CASE = v.T
# Block i, layer 2 (MLP).
SCREAMING_SNAKE_CASE = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_mlp_layer_norm' )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE = wi[0].T
SCREAMING_SNAKE_CASE = wi[1].T
else:
SCREAMING_SNAKE_CASE = wi.T
SCREAMING_SNAKE_CASE = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
SCREAMING_SNAKE_CASE = tax_relpos_bias_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' ).T
SCREAMING_SNAKE_CASE = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
SCREAMING_SNAKE_CASE = old['decoder/logits_dense/kernel'].T
return new
def lowercase (SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : bool ) -> int:
SCREAMING_SNAKE_CASE = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
SCREAMING_SNAKE_CASE = state_dict['shared.weight']
return state_dict
def lowercase (SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = convert_tax_to_pytorch(
SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ , scalable_attention=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def lowercase (SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : bool = False , ) -> Any:
SCREAMING_SNAKE_CASE = MTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'Building PyTorch model from configuration: {config}' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
SCREAMING_SNAKE_CASE = UMTaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
SCREAMING_SNAKE_CASE = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print('Done' )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__UpperCamelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 38
| 1
|
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
lowerCamelCase__ = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
return max(metric_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for gt in ground_truths )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : str = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , 'r' ).readlines()]
lowerCAmelCase__ : Union[str, Any] = []
if args.gold_data_mode == "qa":
lowerCAmelCase__ : str = pd.read_csv(SCREAMING_SNAKE_CASE_ , sep='\t' , header=SCREAMING_SNAKE_CASE_ )
for answer_list in data[1]:
lowerCAmelCase__ : Any = ast.literal_eval(SCREAMING_SNAKE_CASE_ )
answers.append(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Any = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , 'r' ).readlines()]
lowerCAmelCase__ : Any = [[reference] for reference in references]
lowerCAmelCase__ : Union[str, Any] = 0
for prediction, ground_truths in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
total += 1
em += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
fa += metric_max_over_ground_truths(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = 100.0 * em / total
lowerCAmelCase__ : Any = 100.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : str = args.k
lowerCAmelCase__ : Tuple = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , 'r' ).readlines()]
lowerCAmelCase__ : Optional[int] = [line.strip() for line in open(SCREAMING_SNAKE_CASE_ , 'r' ).readlines()]
lowerCAmelCase__ : str = 0
for hypo, reference in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = set(hypo.split('\t' )[:k] )
lowerCAmelCase__ : Tuple = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
lowerCAmelCase__ : Any = 100.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
def strip_title(SCREAMING_SNAKE_CASE_ ):
if title.startswith('"' ):
lowerCAmelCase__ : Union[str, Any] = title[1:]
if title.endswith('"' ):
lowerCAmelCase__ : int = title[:-1]
return title
lowerCAmelCase__ : Union[str, Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , )['input_ids'].to(args.device )
lowerCAmelCase__ : Optional[int] = rag_model.rag.question_encoder(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = question_enc_outputs[0]
lowerCAmelCase__ : Union[str, Any] = rag_model.retriever(
SCREAMING_SNAKE_CASE_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
lowerCAmelCase__ : Dict = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
lowerCAmelCase__ : List[str] = []
for docs in all_docs:
lowerCAmelCase__ : Any = [strip_title(SCREAMING_SNAKE_CASE_ ) for title in docs['title']]
provenance_strings.append('\t'.join(SCREAMING_SNAKE_CASE_ ) )
return provenance_strings
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
with torch.no_grad():
lowerCAmelCase__ : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
SCREAMING_SNAKE_CASE_ , return_tensors='pt' , padding=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = inputs_dict.input_ids.to(args.device )
lowerCAmelCase__ : Union[str, Any] = inputs_dict.attention_mask.to(args.device )
lowerCAmelCase__ : int = rag_model.generate( # rag_model overwrites generate
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=SCREAMING_SNAKE_CASE_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
lowerCAmelCase__ : int = rag_model.retriever.generator_tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )
if args.print_predictions:
for q, a in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
logger.info('Q: {} - A: {}'.format(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
return answers
def lowerCAmelCase__ ( ) -> Any:
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=SCREAMING_SNAKE_CASE_ , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=SCREAMING_SNAKE_CASE_ , choices=['exact', 'compressed', 'legacy'] , type=SCREAMING_SNAKE_CASE_ , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=SCREAMING_SNAKE_CASE_ , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=SCREAMING_SNAKE_CASE_ , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=SCREAMING_SNAKE_CASE_ , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=SCREAMING_SNAKE_CASE_ , type=SCREAMING_SNAKE_CASE_ , required=SCREAMING_SNAKE_CASE_ , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=SCREAMING_SNAKE_CASE_ , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=SCREAMING_SNAKE_CASE_ , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=SCREAMING_SNAKE_CASE_ , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=SCREAMING_SNAKE_CASE_ , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=SCREAMING_SNAKE_CASE_ , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=SCREAMING_SNAKE_CASE_ , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
lowerCAmelCase__ : Union[str, Any] = parser.parse_args()
lowerCAmelCase__ : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : int = {}
if args.model_type is None:
lowerCAmelCase__ : Any = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
lowerCAmelCase__ : List[str] = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
lowerCAmelCase__ : Any = args.n_docs
if args.index_name is not None:
lowerCAmelCase__ : Tuple = args.index_name
if args.index_path is not None:
lowerCAmelCase__ : Tuple = args.index_path
else:
lowerCAmelCase__ : Tuple = BartForConditionalGeneration
lowerCAmelCase__ : int = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[str] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
lowerCAmelCase__ : Any = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(SCREAMING_SNAKE_CASE_ , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(SCREAMING_SNAKE_CASE_ ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
lowerCAmelCase__ : Any = RagRetriever.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , retriever=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
model.retriever.init_retrieval()
else:
lowerCAmelCase__ : int = model_class.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
lowerCAmelCase__ : Any = []
for line in tqdm(SCREAMING_SNAKE_CASE_ ):
questions.append(line.strip() )
if len(SCREAMING_SNAKE_CASE_ ) == args.eval_batch_size:
lowerCAmelCase__ : Tuple = evaluate_batch_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
preds_file.write('\n'.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
preds_file.flush()
lowerCAmelCase__ : Dict = []
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : List[str] = evaluate_batch_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
preds_file.write('\n'.join(SCREAMING_SNAKE_CASE_ ) )
preds_file.flush()
score_fn(SCREAMING_SNAKE_CASE_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
lowerCamelCase__ = get_args()
main(args)
| 212
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
def __init__( self : str , a : Optional[Any] , a : int=13 , a : str=7 , a : str=True , a : List[str]=True , a : Optional[Any]=True , a : int=True , a : List[Any]=99 , a : List[Any]=32 , a : Tuple=5 , a : Any=4 , a : Optional[int]=37 , a : Tuple="gelu" , a : Any=0.1 , a : int=0.1 , a : List[Any]=128 , a : Union[str, Any]=32 , a : Union[str, Any]=16 , a : Dict=2 , a : List[Any]=0.0_2 , a : Optional[Any]=3 , a : List[Any]=4 , a : Optional[int]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Dict = batch_size
lowerCAmelCase__ : Optional[Any] = seq_length
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : List[Any] = use_token_type_ids
lowerCAmelCase__ : str = use_labels
lowerCAmelCase__ : Optional[Any] = vocab_size
lowerCAmelCase__ : Union[str, Any] = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Dict = max_position_embeddings
lowerCAmelCase__ : Any = type_vocab_size
lowerCAmelCase__ : Any = type_sequence_label_size
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Dict = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : Union[str, Any] = scope
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Tuple = None
if self.use_token_type_ids:
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[int] = None
if self.use_labels:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : Dict = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self : Optional[Any] , a : Optional[int] , a : Tuple , a : Optional[int] , a : List[Any] , a : Tuple , a : List[str] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = NezhaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a )
lowerCAmelCase__ : List[str] = model(a , token_type_ids=a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] , a : Dict , a : List[Any] , a : Optional[Any] , a : int , a : Tuple , a : List[Any] , a : Tuple , a : List[str] , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Optional[int] = NezhaModel(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , encoder_attention_mask=a , )
lowerCAmelCase__ : Dict = model(
a , attention_mask=a , token_type_ids=a , encoder_hidden_states=a , )
lowerCAmelCase__ : List[str] = model(a , attention_mask=a , token_type_ids=a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowerCamelCase ( self : Tuple , a : Optional[Any] , a : List[Any] , a : str , a : List[str] , a : Tuple , a : List[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = NezhaForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Dict = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : List[Any] , a : Optional[int] , a : List[Any] , a : int , a : List[str] , a : Union[str, Any] , a : int , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = NezhaForNextSentencePrediction(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : int , a : Optional[int] , a : str , a : List[str] , a : int , a : Dict , a : Optional[Any] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = NezhaForPreTraining(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(
a , attention_mask=a , token_type_ids=a , labels=a , next_sentence_label=a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _lowerCamelCase ( self : Union[str, Any] , a : Dict , a : List[str] , a : Any , a : Any , a : Union[str, Any] , a : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = NezhaForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : str , a : Union[str, Any] , a : Tuple , a : Optional[Any] , a : Dict , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Optional[Any] = NezhaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : List[str] , a : Dict , a : str , a : Optional[Any] , a : Optional[int] , a : List[str] , a : Dict , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.num_labels
lowerCAmelCase__ : str = NezhaForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : int , a : Tuple , a : List[Any] , a : Tuple , a : List[Any] , a : Optional[int] , a : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Any = NezhaForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : Any = model(
a , attention_mask=a , token_type_ids=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : int = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Any = config_and_inputs
lowerCAmelCase__ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': NezhaModel,
'fill-mask': NezhaForMaskedLM,
'question-answering': NezhaForQuestionAnswering,
'text-classification': NezhaForSequenceClassification,
'token-classification': NezhaForTokenClassification,
'zero-shot': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
def _lowerCamelCase ( self : str , a : Tuple , a : int , a : Dict=False ):
'''simple docstring'''
lowerCAmelCase__ : int = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class in get_values(a ):
lowerCAmelCase__ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=a )
lowerCAmelCase__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = NezhaModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : str = self.model_tester.prepare_config_and_inputs_for_decoder()
lowerCAmelCase__ : str = None
self.model_tester.create_and_check_model_as_decoder(
a , a , a , a , a , a , a , a , a , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[Any] = NezhaModel.from_pretrained(a )
self.assertIsNotNone(a )
@slow
@require_torch_gpu
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Any = model_class(config=a )
lowerCAmelCase__ : Union[str, Any] = self._prepare_for_class(a , a )
lowerCAmelCase__ : int = torch.jit.trace(
a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(a , os.path.join(a , 'bert.pt' ) )
lowerCAmelCase__ : Any = torch.jit.load(os.path.join(a , 'bert.pt' ) , map_location=a )
loaded(inputs_dict['input_ids'].to(a ) , inputs_dict['attention_mask'].to(a ) )
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
lowerCAmelCase__ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a )[0]
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : Optional[int] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
lowerCAmelCase__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase__ : Optional[int] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(a , attention_mask=a )[0]
lowerCAmelCase__ : int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
| 212
| 1
|
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
def a__ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : str = RobertaPreLayerNormConfig.from_pretrained(
A_ , architectures=["RobertaPreLayerNormForMaskedLM"] )
# convert state_dict
lowerCAmelCase : Dict = torch.load(hf_hub_download(repo_id=A_ , filename="pytorch_model.bin" ) )
lowerCAmelCase : Union[str, Any] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("roberta." ):
lowerCAmelCase : int = '''roberta_prelayernorm.''' + tensor_key[len("roberta." ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(".self.LayerNorm.weight" ) or tensor_key.endswith(".self.LayerNorm.bias" ):
continue
lowerCAmelCase : Any = tensor_value
lowerCAmelCase : List[Any] = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=A_ , config=A_ , state_dict=A_ )
model.save_pretrained(A_ )
# convert tokenizer
lowerCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(A_ )
tokenizer.save_pretrained(A_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint-repo''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 358
|
"""simple docstring"""
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : Optional[int] =(DEISMultistepScheduler,)
a : str =(("num_inference_steps", 25),)
def lowercase__ ( self , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Tuple = {
"num_train_timesteps": 1_000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
}
config.update(**snake_case__ )
return config
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : List[str] = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
lowerCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Any = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : str = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : List[str] = scheduler_class.from_pretrained(snake_case__ )
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase , lowerCAmelCase : Union[str, Any] = sample, sample
for t in range(snake_case__ , time_step + scheduler.config.solver_order + 1 ):
lowerCAmelCase : Any = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : str = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self ):
"""simple docstring"""
pass
def lowercase__ ( self , snake_case__=0 , **snake_case__ ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : List[str] = kwargs.pop("num_inference_steps" , snake_case__ )
lowerCAmelCase : List[str] = self.dummy_sample
lowerCAmelCase : Optional[int] = 0.1 * sample
lowerCAmelCase : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : int = self.get_scheduler_config()
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
scheduler.set_timesteps(snake_case__ )
# copy over dummy past residuals (must be after setting timesteps)
lowerCAmelCase : Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case__ )
lowerCAmelCase : Any = scheduler_class.from_pretrained(snake_case__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case__ )
# copy over dummy past residual (must be after setting timesteps)
lowerCAmelCase : Optional[int] = dummy_past_residuals[: new_scheduler.config.solver_order]
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : int = new_scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ ( self , snake_case__=None , **snake_case__ ):
"""simple docstring"""
if scheduler is None:
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Optional[int] = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : Any = scheduler_class(**snake_case__ )
lowerCAmelCase : List[str] = self.scheduler_classes[0]
lowerCAmelCase : Tuple = self.get_scheduler_config(**snake_case__ )
lowerCAmelCase : List[str] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = 10
lowerCAmelCase : List[str] = self.dummy_model()
lowerCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Any = model(snake_case__ , snake_case__ )
lowerCAmelCase : List[Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
return sample
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = dict(self.forward_default_kwargs )
lowerCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , snake_case__ )
for scheduler_class in self.scheduler_classes:
lowerCAmelCase : Dict = self.get_scheduler_config()
lowerCAmelCase : List[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : int = self.dummy_sample
lowerCAmelCase : int = 0.1 * sample
if num_inference_steps is not None and hasattr(snake_case__ , "set_timesteps" ):
scheduler.set_timesteps(snake_case__ )
elif num_inference_steps is not None and not hasattr(snake_case__ , "set_timesteps" ):
lowerCAmelCase : List[str] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
lowerCAmelCase : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
lowerCAmelCase : int = scheduler.timesteps[5]
lowerCAmelCase : str = scheduler.timesteps[6]
lowerCAmelCase : str = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case__ , snake_case__ , snake_case__ , **snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : int = DEISMultistepScheduler(**self.get_scheduler_config() )
lowerCAmelCase : Dict = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
lowerCAmelCase : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
lowerCAmelCase : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : str = UniPCMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = DEISMultistepScheduler.from_config(scheduler.config )
lowerCAmelCase : Union[str, Any] = self.full_loop(scheduler=snake_case__ )
lowerCAmelCase : str = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(thresholding=snake_case__ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case__ , prediction_type=snake_case__ , sample_max_value=snake_case__ , algorithm_type="deis" , solver_order=snake_case__ , solver_type=snake_case__ , )
def lowercase__ ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
lowerCAmelCase : Any = self.full_loop(
solver_order=snake_case__ , solver_type=snake_case__ , prediction_type=snake_case__ , algorithm_type=snake_case__ , )
assert not torch.isnan(snake_case__ ).any(), "Samples have nan numbers"
def lowercase__ ( self ):
"""simple docstring"""
self.check_over_configs(lower_order_final=snake_case__ )
self.check_over_configs(lower_order_final=snake_case__ )
def lowercase__ ( self ):
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=snake_case__ , time_step=0 )
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : List[str] = self.full_loop()
lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.23916 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def lowercase__ ( self ):
"""simple docstring"""
lowerCAmelCase : Dict = self.scheduler_classes[0]
lowerCAmelCase : str = self.get_scheduler_config(thresholding=snake_case__ , dynamic_thresholding_ratio=0 )
lowerCAmelCase : Optional[Any] = scheduler_class(**snake_case__ )
lowerCAmelCase : Optional[Any] = 10
lowerCAmelCase : Tuple = self.dummy_model()
lowerCAmelCase : List[str] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case__ )
for i, t in enumerate(scheduler.timesteps ):
lowerCAmelCase : Union[str, Any] = model(snake_case__ , snake_case__ )
lowerCAmelCase : Tuple = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
assert sample.dtype == torch.floataa
| 133
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[str] = '''adapt react readapt apt'''
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Optional[Any] = '''adapt react readapt apt'''
lowercase__ : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 198
|
'''simple docstring'''
from math import pow
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase__ : Optional[Any] = int(pow(UpperCAmelCase , UpperCAmelCase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase__ , lowercase__ : Dict = backtrack(
UpperCAmelCase , UpperCAmelCase , current_number + 1 , UpperCAmelCase , UpperCAmelCase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase__ , lowercase__ : str = backtrack(
UpperCAmelCase , UpperCAmelCase , current_number + 1 , UpperCAmelCase , UpperCAmelCase )
return current_sum, solutions_count
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(UpperCAmelCase , UpperCAmelCase , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 198
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : int = {
"alibaba-damo/mgp-str-base": "https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json",
}
class _snake_case ( lowercase_ ):
lowerCAmelCase_ : Union[str, Any] = "mgp-str"
def __init__( self , a__=[32, 128] , a__=4 , a__=3 , a__=27 , a__=38 , a__=50_257 , a__=30_522 , a__=768 , a__=12 , a__=12 , a__=4.0 , a__=True , a__=False , a__=1e-5 , a__=0.0 , a__=0.0 , a__=0.0 , a__=False , a__=0.0_2 , **a__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**a__ )
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = max_token_length
snake_case_ = num_character_labels
snake_case_ = num_bpe_labels
snake_case_ = num_wordpiece_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = mlp_ratio
snake_case_ = distilled
snake_case_ = layer_norm_eps
snake_case_ = drop_rate
snake_case_ = qkv_bias
snake_case_ = attn_drop_rate
snake_case_ = drop_path_rate
snake_case_ = output_aa_attentions
snake_case_ = initializer_range
| 92
|
'''simple docstring'''
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class _snake_case ( unittest.TestCase , lowercase_ ):
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = load_tool("text-classification" )
self.tool.setup()
snake_case_ = load_tool("text-classification" , remote=a__ )
def lowerCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
def lowerCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] )
self.assertEqual(a__ , "positive" )
| 92
| 1
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 10**9 ):
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__SCREAMING_SNAKE_CASE = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class snake_case__ :
def __init__( self , lowerCAmelCase__ = None ) -> None:
if components is None:
__magic_name__ : Any = []
__magic_name__ : List[str] = list(lowerCAmelCase__ )
def __len__( self ) -> int:
return len(self.__components )
def __str__( self ) -> str:
return "(" + ",".join(map(lowerCAmelCase__ , self.__components ) ) + ")"
def __add__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : Dict = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] + other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else:
raise Exception("""must have the same size""" )
def __sub__( self , lowerCAmelCase__ ) -> Vector:
__magic_name__ : int = len(self )
if size == len(lowerCAmelCase__ ):
__magic_name__ : str = [self.__components[i] - other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return Vector(lowerCAmelCase__ )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> float:
...
def __mul__( self , lowerCAmelCase__ ) -> float | Vector:
if isinstance(lowerCAmelCase__ , (float, int) ):
__magic_name__ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(self ) == len(lowerCAmelCase__ ):
__magic_name__ : Optional[Any] = len(self )
__magic_name__ : List[Any] = [self.__components[i] * other.component(lowerCAmelCase__ ) for i in range(lowerCAmelCase__ )]
return sum(lowerCAmelCase__ )
else: # error case
raise Exception("""invalid operand!""" )
def __magic_name__ ( self ) -> Vector:
return Vector(self.__components )
def __magic_name__ ( self , lowerCAmelCase__ ) -> float:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
__magic_name__ : Optional[int] = value
def __magic_name__ ( self ) -> float:
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__magic_name__ : Dict = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase__ ) )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = False ) -> float:
__magic_name__ : Optional[Any] = self * other
__magic_name__ : List[str] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def UpperCamelCase ( _A ):
"""simple docstring"""
assert isinstance(_A, _A )
return Vector([0] * dimension )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
assert isinstance(_A, _A ) and (isinstance(_A, _A ))
__magic_name__ : Union[str, Any] = [0] * dimension
__magic_name__ : Optional[int] = 1
return Vector(_A )
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
assert (
isinstance(_A, _A )
and isinstance(_A, _A )
and (isinstance(_A, (int, float) ))
)
return x * scalar + y
def UpperCamelCase ( _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : Union[str, Any] = [random.randint(_A, _A ) for _ in range(_A )]
return Vector(_A )
class snake_case__ :
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
__magic_name__ : Dict = matrix
__magic_name__ : Tuple = w
__magic_name__ : Union[str, Any] = h
def __str__( self ) -> str:
__magic_name__ : Dict = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Tuple = []
for i in range(self.__height ):
__magic_name__ : Tuple = [
self.__matrix[i][j] + other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
__magic_name__ : Optional[Any] = []
for i in range(self.__height ):
__magic_name__ : int = [
self.__matrix[i][j] - other.component(lowerCAmelCase__ , lowerCAmelCase__ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase__ )
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
...
@overload
def __mul__( self , lowerCAmelCase__ ) -> Vector:
...
def __mul__( self , lowerCAmelCase__ ) -> Vector | Matrix:
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): # matrix-vector
if len(lowerCAmelCase__ ) == self.__width:
__magic_name__ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
__magic_name__ : Optional[int] = [
self.__matrix[i][j] * other.component(lowerCAmelCase__ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase__ , sum(lowerCAmelCase__ ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(lowerCAmelCase__ , (int, float) ): # matrix-scalar
__magic_name__ : Any = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase__ , self.__width , self.__height )
return None
def __magic_name__ ( self ) -> int:
return self.__height
def __magic_name__ ( self ) -> int:
return self.__width
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
__magic_name__ : List[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__magic_name__ : Optional[int] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase__ ) ):
__magic_name__ : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase__ , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase__ , lowerCAmelCase__ )
else:
raise Exception("""Indices out of bounds""" )
def __magic_name__ ( self ) -> float:
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__magic_name__ : str = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase__ ) for y in range(self.__width )
]
return sum(lowerCAmelCase__ )
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : list[list[float]] = [[0] * n for _ in range(_A )]
return Matrix(_A, _A, _A )
def UpperCamelCase ( _A, _A, _A, _A ):
"""simple docstring"""
random.seed(_A )
__magic_name__ : list[list[float]] = [
[random.randint(_A, _A ) for _ in range(_A )] for _ in range(_A )
]
return Matrix(_A, _A, _A )
| 342
| 0
|
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
lowercase_ = 0
lowercase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowercase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
lowercase_ = tuple[int, int]
class SCREAMING_SNAKE_CASE :
def __init__( self : Any , a : int , a : int , a : int , a : int , a : int , a : Node | None , )-> None:
"""simple docstring"""
lowercase__ = pos_x
lowercase__ = pos_y
lowercase__ = (pos_y, pos_x)
lowercase__ = goal_x
lowercase__ = goal_y
lowercase__ = g_cost
lowercase__ = parent
lowercase__ = self.calculate_heuristic()
lowercase__ = self.g_cost + self.h_cost
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> float:
"""simple docstring"""
lowercase__ = self.pos_x - self.goal_x
lowercase__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(a ) + abs(a )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self : Any , a : Node )-> bool:
"""simple docstring"""
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE :
def __init__( self : str , a : TPosition , a : TPosition )-> int:
"""simple docstring"""
lowercase__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , a )
lowercase__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 99_999 , a )
lowercase__ = [self.start]
lowercase__ = []
lowercase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> list[TPosition]:
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
lowercase__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(a )
self.closed_nodes.append(a )
lowercase__ = self.get_successors(a )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(a )
else:
# retrieve the best current path
lowercase__ = self.open_nodes.pop(self.open_nodes.index(a ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(a )
else:
self.open_nodes.append(a )
return [self.start.pos]
def SCREAMING_SNAKE_CASE_ ( self : int , a : Node )-> list[Node]:
"""simple docstring"""
lowercase__ = []
for action in delta:
lowercase__ = parent.pos_x + action[1]
lowercase__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
a , a , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , a , ) )
return successors
def SCREAMING_SNAKE_CASE_ ( self : str , a : Node | None )-> list[TPosition]:
"""simple docstring"""
lowercase__ = node
lowercase__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowercase__ = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] , a : TPosition , a : TPosition )-> None:
"""simple docstring"""
lowercase__ = AStar(a , a )
lowercase__ = AStar(a , a )
lowercase__ = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] )-> list[TPosition]:
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
lowercase__ = self.fwd_astar.open_nodes.pop(0 )
lowercase__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
a , a )
self.fwd_astar.closed_nodes.append(a )
self.bwd_astar.closed_nodes.append(a )
lowercase__ = current_bwd_node
lowercase__ = current_fwd_node
lowercase__ = {
self.fwd_astar: self.fwd_astar.get_successors(a ),
self.bwd_astar: self.bwd_astar.get_successors(a ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(a )
else:
# retrieve the best current path
lowercase__ = astar.open_nodes.pop(
astar.open_nodes.index(a ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(a )
else:
astar.open_nodes.append(a )
return [self.fwd_astar.start.pos]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : Node , a : Node )-> list[TPosition]:
"""simple docstring"""
lowercase__ = self.fwd_astar.retrace_path(a )
lowercase__ = self.bwd_astar.retrace_path(a )
bwd_path.pop()
bwd_path.reverse()
lowercase__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
lowercase_ = (0, 0)
lowercase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
lowercase_ = time.time()
lowercase_ = AStar(init, goal)
lowercase_ = a_star.search()
lowercase_ = time.time() - start_time
print(f'''AStar execution time = {end_time:f} seconds''')
lowercase_ = time.time()
lowercase_ = BidirectionalAStar(init, goal)
lowercase_ = time.time() - bd_start_time
print(f'''BidirectionalAStar execution time = {bd_end_time:f} seconds''')
| 351
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowercase__ = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
'decoder.output_projection.weight',
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> Dict:
lowercase__ , lowercase__ = emb.weight.shape
lowercase__ = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
lowercase__ = emb.weight.data
return lin_layer
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> str:
lowercase__ = torch.load(_SCREAMING_SNAKE_CASE , map_location='cpu' )['model']
remove_ignore_keys_(_SCREAMING_SNAKE_CASE )
lowercase__ = state_dict['encoder.embed_tokens.weight'].shape[0]
lowercase__ = MBartConfig.from_pretrained(_SCREAMING_SNAKE_CASE , vocab_size=_SCREAMING_SNAKE_CASE )
if mbart_aa and finetuned:
lowercase__ = 'relu'
lowercase__ = state_dict['decoder.embed_tokens.weight']
lowercase__ = MBartForConditionalGeneration(_SCREAMING_SNAKE_CASE )
model.model.load_state_dict(_SCREAMING_SNAKE_CASE )
if finetuned:
lowercase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
lowercase_ = parser.parse_args()
lowercase_ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 269
| 0
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
UpperCAmelCase_ : List[str] = '''sshleifer/bart-tiny-random'''
UpperCAmelCase_ : Tuple = '''patrickvonplaten/t5-tiny-random'''
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _A ( self : Union[str, Any] ):
return AutoConfig.from_pretrained(__lowerCamelCase )
def _A ( self : Any ):
UpperCamelCase , *UpperCamelCase :Dict = create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def _A ( self : List[str] ):
UpperCamelCase , *UpperCamelCase :Any = create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=1 , d=__lowerCamelCase )
def _A ( self : Union[str, Any] ):
UpperCamelCase , *UpperCamelCase :Union[str, Any] = create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=1 , d=__lowerCamelCase )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def _A ( self : Tuple ):
UpperCamelCase , *UpperCamelCase :int = create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def _A ( self : str ):
with self.assertRaises(__lowerCamelCase ):
create_student_by_copying_alternating_layers(__lowerCamelCase , tempfile.mkdtemp() , e=__lowerCamelCase , d=__lowerCamelCase )
| 38
|
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=13 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=224 , __lowerCamelCase : Any=30 , __lowerCamelCase : Tuple=400 , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ):
UpperCamelCase :List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase :str = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :Dict = num_channels
UpperCamelCase :str = image_size
UpperCamelCase :Dict = min_resolution
UpperCamelCase :str = max_resolution
UpperCamelCase :Union[str, Any] = do_resize
UpperCamelCase :Optional[Any] = size
UpperCamelCase :Any = do_normalize
UpperCamelCase :Optional[Any] = image_mean
UpperCamelCase :Tuple = image_std
def _A ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = ViTImageProcessor if is_vision_available() else None
def _A ( self : str ):
UpperCamelCase :Tuple = EfficientFormerImageProcessorTester(self )
@property
def _A ( self : List[str] ):
return self.image_proc_tester.prepare_image_processor_dict()
def _A ( self : int ):
UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
def _A ( self : Optional[int] ):
pass
def _A ( self : str ):
# Initialize image_processor
UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _A ( self : Union[str, Any] ):
# Initialize image_processor
UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :Tuple = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _A ( self : List[Any] ):
# Initialize image_processor
UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :str = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 38
| 1
|
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase_ = "src/diffusers"
UpperCamelCase_ = "."
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase_ = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase_ = spec.loader.load_module()
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
return line.startswith(__UpperCAmelCase ) or len(__UpperCAmelCase ) <= 1 or re.search(r'''^\s*\)(\s*->.*:|:)\s*$''' , __UpperCAmelCase ) is not None
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = object_name.split('''.''' )
UpperCAmelCase_ = 0
# First let's find the module where our object lives.
UpperCAmelCase_ = parts[i]
while i < len(__UpperCAmelCase ) and not os.path.isfile(os.path.join(__UpperCAmelCase , f"{module}.py" ) ):
i += 1
if i < len(__UpperCAmelCase ):
UpperCAmelCase_ = os.path.join(__UpperCAmelCase , parts[i] )
if i >= len(__UpperCAmelCase ):
raise ValueError(f"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(__UpperCAmelCase , f"{module}.py" ) , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
# Now let's find the class / func in the code!
UpperCAmelCase_ = ''''''
UpperCAmelCase_ = 0
for name in parts[i + 1 :]:
while (
line_index < len(__UpperCAmelCase ) and re.search(rf"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(__UpperCAmelCase ):
raise ValueError(f" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
UpperCAmelCase_ = line_index
while line_index < len(__UpperCAmelCase ) and _should_continue(lines[line_index] , __UpperCAmelCase ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_ = lines[start_index:line_index]
return "".join(__UpperCAmelCase )
UpperCamelCase_ = re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
UpperCamelCase_ = re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)")
UpperCamelCase_ = re.compile(r"<FILL\s+[^>]*>")
def A ( __UpperCAmelCase ) -> str:
'''simple docstring'''
UpperCAmelCase_ = code.split('''\n''' )
UpperCAmelCase_ = 0
while idx < len(__UpperCAmelCase ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(__UpperCAmelCase ):
return re.search(r'''^(\s*)\S''' , lines[idx] ).groups()[0]
return ""
def A ( __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = len(get_indent(__UpperCAmelCase ) ) > 0
if has_indent:
UpperCAmelCase_ = f"class Bla:\n{code}"
UpperCAmelCase_ = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=__UpperCAmelCase )
UpperCAmelCase_ = black.format_str(__UpperCAmelCase , mode=__UpperCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = style_docstrings_in_code(__UpperCAmelCase )
return result[len('''class Bla:\n''' ) :] if has_indent else result
def A ( __UpperCAmelCase , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
with open(__UpperCAmelCase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(__UpperCAmelCase ):
UpperCAmelCase_ = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = search.groups()
UpperCAmelCase_ = find_code_in_diffusers(__UpperCAmelCase )
UpperCAmelCase_ = get_indent(__UpperCAmelCase )
UpperCAmelCase_ = line_index + 1 if indent == theoretical_indent else line_index + 2
UpperCAmelCase_ = theoretical_indent
UpperCAmelCase_ = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
UpperCAmelCase_ = True
while line_index < len(__UpperCAmelCase ) and should_continue:
line_index += 1
if line_index >= len(__UpperCAmelCase ):
break
UpperCAmelCase_ = lines[line_index]
UpperCAmelCase_ = _should_continue(__UpperCAmelCase , __UpperCAmelCase ) and re.search(f"^{indent}# End copy" , __UpperCAmelCase ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
UpperCAmelCase_ = lines[start_index:line_index]
UpperCAmelCase_ = ''''''.join(__UpperCAmelCase )
# Remove any nested `Copied from` comments to avoid circular copies
UpperCAmelCase_ = [line for line in theoretical_code.split('''\n''' ) if _re_copy_warning.search(__UpperCAmelCase ) is None]
UpperCAmelCase_ = '''\n'''.join(__UpperCAmelCase )
# Before comparing, use the `replace_pattern` on the original code.
if len(__UpperCAmelCase ) > 0:
UpperCAmelCase_ = replace_pattern.replace('''with''' , '''''' ).split(''',''' )
UpperCAmelCase_ = [_re_replace_pattern.search(__UpperCAmelCase ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = pattern.groups()
UpperCAmelCase_ = re.sub(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if option.strip() == "all-casing":
UpperCAmelCase_ = re.sub(obja.lower() , obja.lower() , __UpperCAmelCase )
UpperCAmelCase_ = re.sub(obja.upper() , obja.upper() , __UpperCAmelCase )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
UpperCAmelCase_ = blackify(lines[start_index - 1] + theoretical_code )
UpperCAmelCase_ = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
UpperCAmelCase_ = lines[:start_index] + [theoretical_code] + lines[line_index:]
UpperCAmelCase_ = start_index + 1
if overwrite and len(__UpperCAmelCase ) > 0:
# Warn the user a file has been modified.
print(f"Detected changes, rewriting {filename}." )
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(__UpperCAmelCase )
return diffs
def A ( __UpperCAmelCase = False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = glob.glob(os.path.join(__UpperCAmelCase , '''**/*.py''' ) , recursive=__UpperCAmelCase )
UpperCAmelCase_ = []
for filename in all_files:
UpperCAmelCase_ = is_copy_consistent(__UpperCAmelCase , __UpperCAmelCase )
diffs += [f"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(__UpperCAmelCase ) > 0:
UpperCAmelCase_ = '''\n'''.join(__UpperCAmelCase )
raise Exception(
'''Found the following copy inconsistencies:\n'''
+ diff
+ '''\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.''' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
UpperCamelCase_ = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 344
|
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _snake_case ):
UpperCamelCase__ : List[Any] =(PNDMScheduler,)
UpperCamelCase__ : Optional[Any] =(("num_inference_steps", 50),)
def __a ( self :Union[str, Any] , **_lowercase :Any) -> Union[str, Any]:
UpperCAmelCase_ = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
}
config.update(**_lowercase)
return config
def __a ( self :str , _lowercase :List[Any]=0 , **_lowercase :str) -> Union[str, Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :Any) -> Optional[Any]:
pass
def __a ( self :str , _lowercase :int=0 , **_lowercase :Union[str, Any]) -> List[Any]:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# copy over dummy past residuals (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(_lowercase)
UpperCAmelCase_ = scheduler_class.from_pretrained(_lowercase)
# copy over dummy past residuals
new_scheduler.set_timesteps(_lowercase)
# copy over dummy past residual (must be after setting timesteps)
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_prk(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = new_scheduler.step_plms(_lowercase , _lowercase , _lowercase , **_lowercase).prev_sample
assert torch.sum(torch.abs(output - new_output)) < 1E-5, "Scheduler outputs are not identical"
def __a ( self :int , **_lowercase :str) -> Optional[Any]:
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(**_lowercase)
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = 10
UpperCAmelCase_ = self.dummy_model()
UpperCAmelCase_ = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase)
for i, t in enumerate(scheduler.prk_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
for i, t in enumerate(scheduler.plms_timesteps):
UpperCAmelCase_ = model(_lowercase , _lowercase)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , _lowercase , _lowercase).prev_sample
return sample
def __a ( self :Union[str, Any]) -> int:
UpperCAmelCase_ = dict(self.forward_default_kwargs)
UpperCAmelCase_ = kwargs.pop('''num_inference_steps''' , _lowercase)
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
if num_inference_steps is not None and hasattr(_lowercase , '''set_timesteps'''):
scheduler.set_timesteps(_lowercase)
elif num_inference_steps is not None and not hasattr(_lowercase , '''set_timesteps'''):
UpperCAmelCase_ = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
UpperCAmelCase_ = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
UpperCAmelCase_ = dummy_past_residuals[:]
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_prk(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 0 , _lowercase , **_lowercase).prev_sample
UpperCAmelCase_ = scheduler.step_plms(_lowercase , 1 , _lowercase , **_lowercase).prev_sample
self.assertEqual(output_a.shape , sample.shape)
self.assertEqual(output_a.shape , output_a.shape)
def __a ( self :Any) -> Dict:
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=_lowercase)
def __a ( self :List[Any]) -> Any:
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase)
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config(steps_offset=1)
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(10)
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1]) , )
def __a ( self :Optional[int]) -> str:
for beta_start, beta_end in zip([0.0_001, 0.001] , [0.002, 0.02]):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase)
def __a ( self :Any) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase)
def __a ( self :List[Any]) -> Dict:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase)
def __a ( self :Any) -> Tuple:
for t in [1, 5, 10]:
self.check_over_forward(time_step=_lowercase)
def __a ( self :Tuple) -> Dict:
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100]):
self.check_over_forward(num_inference_steps=_lowercase)
def __a ( self :str) -> List[Any]:
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
UpperCAmelCase_ = 27
for scheduler_class in self.scheduler_classes:
UpperCAmelCase_ = self.dummy_sample
UpperCAmelCase_ = 0.1 * sample
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.set_timesteps(_lowercase)
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2]):
UpperCAmelCase_ = scheduler.step_prk(_lowercase , _lowercase , _lowercase).prev_sample
def __a ( self :List[str]) -> int:
with self.assertRaises(_lowercase):
UpperCAmelCase_ = self.scheduler_classes[0]
UpperCAmelCase_ = self.get_scheduler_config()
UpperCAmelCase_ = scheduler_class(**_lowercase)
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample).prev_sample
def __a ( self :List[str]) -> Dict:
UpperCAmelCase_ = self.full_loop()
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 198.1_318) < 1E-2
assert abs(result_mean.item() - 0.2_580) < 1E-3
def __a ( self :Any) -> Tuple:
UpperCAmelCase_ = self.full_loop(prediction_type='''v_prediction''')
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 67.3_986) < 1E-2
assert abs(result_mean.item() - 0.0_878) < 1E-3
def __a ( self :int) -> Any:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 230.0_399) < 1E-2
assert abs(result_mean.item() - 0.2_995) < 1E-3
def __a ( self :Any) -> Dict:
# We specify different beta, so that the first alpha is 0.99
UpperCAmelCase_ = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01)
UpperCAmelCase_ = torch.sum(torch.abs(_lowercase))
UpperCAmelCase_ = torch.mean(torch.abs(_lowercase))
assert abs(result_sum.item() - 186.9_482) < 1E-2
assert abs(result_mean.item() - 0.2_434) < 1E-3
| 344
| 1
|
from math import factorial
def A ( a_ ,a_ ) -> int:
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('Please enter positive integers for n and k where n >= k' )
return factorial(a_ ) // (factorial(a_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'''The number of five-card hands possible from a standard''',
f"fifty-two card deck is: {combinations(52, 5)}\n",
)
print(
'''If a class of 40 students must be arranged into groups of''',
f"4 for group projects, there are {combinations(40, 4)} ways",
'''to arrange them.\n''',
)
print(
'''If 10 teams are competing in a Formula One race, there''',
f"are {combinations(10, 3)} ways that first, second and",
'''third place can be awarded.''',
)
| 71
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_=True , snake_case_="pt" ):
'''simple docstring'''
_UpperCAmelCase = {"add_prefix_space": True} if isinstance(snake_case_ , snake_case_ ) and not line.startswith(" " ) else {}
_UpperCAmelCase = padding_side
return tokenizer(
[line] , max_length=snake_case_ , padding="max_length" if pad_to_max_length else None , truncation=snake_case_ , return_tensors=snake_case_ , add_special_tokens=snake_case_ , **snake_case_ , )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , ):
'''simple docstring'''
_UpperCAmelCase = input_ids.ne(snake_case_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( UpperCAmelCase__ ):
def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str]="train" , snake_case__ : List[str]=None , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : List[str]="" , ):
"""simple docstring"""
super().__init__()
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".source" )
_UpperCAmelCase = Path(snake_case__ ).joinpath(type_path + ".target" )
_UpperCAmelCase = self.get_char_lens(self.src_file )
_UpperCAmelCase = max_source_length
_UpperCAmelCase = max_target_length
assert min(self.src_lens ) > 0, F"""found empty line in {self.src_file}"""
_UpperCAmelCase = tokenizer
_UpperCAmelCase = prefix
if n_obs is not None:
_UpperCAmelCase = self.src_lens[:n_obs]
_UpperCAmelCase = src_lang
_UpperCAmelCase = tgt_lang
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self : Optional[Any] , snake_case__ : str ):
"""simple docstring"""
_UpperCAmelCase = index + 1 # linecache starts at 1
_UpperCAmelCase = self.prefix + linecache.getline(str(self.src_file ) , snake_case__ ).rstrip("\n" )
_UpperCAmelCase = linecache.getline(str(self.tgt_file ) , snake_case__ ).rstrip("\n" )
assert source_line, F"""empty source line for index {index}"""
assert tgt_line, F"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , snake_case__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_UpperCAmelCase = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
)
_UpperCAmelCase = self.tokenizer.generator if isinstance(self.tokenizer , snake_case__ ) else self.tokenizer
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_source_length , "right" )
_UpperCAmelCase = encode_line(snake_case__ , snake_case__ , self.max_target_length , "right" )
_UpperCAmelCase = source_inputs["input_ids"].squeeze()
_UpperCAmelCase = target_inputs["input_ids"].squeeze()
_UpperCAmelCase = source_inputs["attention_mask"].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCamelCase ( snake_case__ : Optional[Any] ):
"""simple docstring"""
return [len(snake_case__ ) for x in Path(snake_case__ ).open().readlines()]
def UpperCamelCase ( self : Any , snake_case__ : List[Any] ):
"""simple docstring"""
_UpperCAmelCase = torch.stack([x["input_ids"] for x in batch] )
_UpperCAmelCase = torch.stack([x["attention_mask"] for x in batch] )
_UpperCAmelCase = torch.stack([x["decoder_input_ids"] for x in batch] )
_UpperCAmelCase = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , snake_case__ )
else self.tokenizer.pad_token_id
)
_UpperCAmelCase = trim_batch(snake_case__ , snake_case__ )
_UpperCAmelCase , _UpperCAmelCase = trim_batch(snake_case__ , snake_case__ , attention_mask=snake_case__ )
_UpperCAmelCase = {
"input_ids": source_ids,
"attention_mask": source_mask,
"decoder_input_ids": y,
}
return batch
lowercase_ : Dict = getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return list(itertools.chain.from_iterable(snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = get_git_info()
save_json(snake_case_ , os.path.join(snake_case_ , "git_log.json" ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=4 , **snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=snake_case_ , **snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
with open(snake_case_ ) as f:
return json.load(snake_case_ )
def __SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
_UpperCAmelCase = git.Repo(search_parent_directories=snake_case_ )
_UpperCAmelCase = {
"repo_id": str(snake_case_ ),
"repo_sha": str(repo.head.object.hexsha ),
"repo_branch": str(repo.active_branch ),
"hostname": str(socket.gethostname() ),
}
return repo_infos
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return list(map(snake_case_ , snake_case_ ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
with open(snake_case_ , "wb" ) as f:
return pickle.dump(snake_case_ , snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
def remove_articles(snake_case_ ):
return re.sub(R"\b(a|an|the)\b" , " " , snake_case_ )
def white_space_fix(snake_case_ ):
return " ".join(text.split() )
def remove_punc(snake_case_ ):
_UpperCAmelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case_ ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case_ ) ) ) )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = normalize_answer(snake_case_ ).split()
_UpperCAmelCase = Counter(snake_case_ ) & Counter(snake_case_ )
_UpperCAmelCase = sum(common.values() )
if num_same == 0:
return 0
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = 1.0 * num_same / len(snake_case_ )
_UpperCAmelCase = (2 * precision * recall) / (precision + recall)
return fa
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
return normalize_answer(snake_case_ ) == normalize_answer(snake_case_ )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
'''simple docstring'''
assert len(snake_case_ ) == len(snake_case_ )
_UpperCAmelCase = 0
for hypo, pred in zip(snake_case_ , snake_case_ ):
em += exact_match_score(snake_case_ , snake_case_ )
if len(snake_case_ ) > 0:
em /= len(snake_case_ )
return {"em": em}
def __SCREAMING_SNAKE_CASE ( snake_case_ ):
'''simple docstring'''
return model_prefix.startswith("rag" )
def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ ):
'''simple docstring'''
_UpperCAmelCase = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_UpperCAmelCase = "dropout_rate"
for p in extra_params:
if getattr(snake_case_ , snake_case_ , snake_case_ ):
if not hasattr(snake_case_ , snake_case_ ) and not hasattr(snake_case_ , equivalent_param[p] ):
logger.info("config doesn't have a `{}` attribute".format(snake_case_ ) )
delattr(snake_case_ , snake_case_ )
continue
_UpperCAmelCase = p if hasattr(snake_case_ , snake_case_ ) else equivalent_param[p]
setattr(snake_case_ , snake_case_ , getattr(snake_case_ , snake_case_ ) )
delattr(snake_case_ , snake_case_ )
return hparams, config
| 133
| 0
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase :Tuple = datasets.utils.logging.get_logger(__name__)
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
A_ : Optional[int] = None
A_ : int = None
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
A_ : int = datasets.Audio()
A_ : Tuple = """audio"""
A_ : Union[str, Any] = AudioFolderConfig
A_ : List[Any] = 42 # definition at the bottom of the script
A_ : int = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowerCAmelCase :Dict = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase :List[str] = AUDIO_EXTENSIONS
| 358
|
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__magic_name__ : int = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase :Any = input('''Enter a string ''').strip()
lowerCAmelCase :List[Any] = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.')
| 275
| 0
|
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : float = 1E-12 , SCREAMING_SNAKE_CASE_ : int = 1_00 , ):
assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE_ )[0] == np.shape(SCREAMING_SNAKE_CASE_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE_ ) == np.iscomplexobj(SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = np.iscomplexobj(SCREAMING_SNAKE_CASE_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 1E12
while not convergence:
# Multiple matrix by the vector.
__lowerCAmelCase = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Normalize the resulting output vector.
__lowerCAmelCase = w / np.linalg.norm(SCREAMING_SNAKE_CASE_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__lowerCAmelCase = vector.conj().T if is_complex else vector.T
__lowerCAmelCase = np.dot(SCREAMING_SNAKE_CASE_ , np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Check convergence.
__lowerCAmelCase = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__lowerCAmelCase = True
__lowerCAmelCase = lambda_
if is_complex:
__lowerCAmelCase = np.real(lambda_ )
return lambda_, vector
def _a ( ):
__lowerCAmelCase = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__lowerCAmelCase = np.array([41, 4, 20] )
__lowerCAmelCase = real_input_matrix.astype(np.complexaaa )
__lowerCAmelCase = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__lowerCAmelCase = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__lowerCAmelCase = real_input_matrix
__lowerCAmelCase = real_vector
elif problem_type == "complex":
__lowerCAmelCase = complex_input_matrix
__lowerCAmelCase = complex_vector
# Our implementation.
__lowerCAmelCase , __lowerCAmelCase = power_iteration(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__lowerCAmelCase , __lowerCAmelCase = np.linalg.eigh(SCREAMING_SNAKE_CASE_ )
# Last eigenvalue is the maximum one.
__lowerCAmelCase = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__lowerCAmelCase = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE_ ) - np.abs(SCREAMING_SNAKE_CASE_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 92
|
def _a ( SCREAMING_SNAKE_CASE_ : int = 1_00_00_00 ):
__lowerCAmelCase = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , SCREAMING_SNAKE_CASE_ ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 92
| 1
|
UpperCAmelCase_ = {
'Pillow': 'Pillow<10.0.0',
'accelerate': 'accelerate>=0.20.3',
'av': 'av==9.2.0',
'beautifulsoup4': 'beautifulsoup4',
'black': 'black~=23.1',
'codecarbon': 'codecarbon==1.2.0',
'cookiecutter': 'cookiecutter==1.7.3',
'dataclasses': 'dataclasses',
'datasets': 'datasets!=2.5.0',
'decord': 'decord==0.6.0',
'deepspeed': 'deepspeed>=0.9.3',
'diffusers': 'diffusers',
'dill': 'dill<0.3.5',
'evaluate': 'evaluate>=0.2.0',
'fairscale': 'fairscale>0.3',
'faiss-cpu': 'faiss-cpu',
'fastapi': 'fastapi',
'filelock': 'filelock',
'flax': 'flax>=0.4.1,<=0.7.0',
'ftfy': 'ftfy',
'fugashi': 'fugashi>=1.0',
'GitPython': 'GitPython<3.1.19',
'hf-doc-builder': 'hf-doc-builder>=0.3.0',
'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0',
'importlib_metadata': 'importlib_metadata',
'ipadic': 'ipadic>=1.0.0,<2.0',
'isort': 'isort>=5.5.4',
'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13',
'jaxlib': 'jaxlib>=0.1.65,<=0.4.13',
'jieba': 'jieba',
'kenlm': 'kenlm',
'keras-nlp': 'keras-nlp>=0.3.1',
'librosa': 'librosa',
'nltk': 'nltk',
'natten': 'natten>=0.14.6',
'numpy': 'numpy>=1.17',
'onnxconverter-common': 'onnxconverter-common',
'onnxruntime-tools': 'onnxruntime-tools>=1.4.2',
'onnxruntime': 'onnxruntime>=1.4.0',
'opencv-python': 'opencv-python',
'optuna': 'optuna',
'optax': 'optax>=0.0.8,<=0.1.4',
'packaging': 'packaging>=20.0',
'parameterized': 'parameterized',
'phonemizer': 'phonemizer',
'protobuf': 'protobuf',
'psutil': 'psutil',
'pyyaml': 'pyyaml>=5.1',
'pydantic': 'pydantic<2',
'pytest': 'pytest>=7.2.0',
'pytest-timeout': 'pytest-timeout',
'pytest-xdist': 'pytest-xdist',
'python': 'python>=3.8.0',
'ray[tune]': 'ray[tune]',
'regex': 'regex!=2019.12.17',
'requests': 'requests',
'rhoknp': 'rhoknp>=1.1.0,<1.3.1',
'rjieba': 'rjieba',
'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1',
'ruff': 'ruff>=0.0.241,<=0.0.259',
'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0',
'sacremoses': 'sacremoses',
'safetensors': 'safetensors>=0.3.1',
'sagemaker': 'sagemaker>=2.31.0',
'scikit-learn': 'scikit-learn',
'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92',
'sigopt': 'sigopt',
'starlette': 'starlette',
'sudachipy': 'sudachipy>=0.6.6',
'sudachidict_core': 'sudachidict_core>=20220729',
'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14',
'tensorflow': 'tensorflow>=2.6,<2.14',
'tensorflow-text': 'tensorflow-text<2.14',
'tf2onnx': 'tf2onnx',
'timeout-decorator': 'timeout-decorator',
'timm': 'timm',
'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14',
'torch': 'torch>=1.9,!=1.12.0',
'torchaudio': 'torchaudio',
'torchvision': 'torchvision',
'pyctcdecode': 'pyctcdecode>=0.4.0',
'tqdm': 'tqdm>=4.27',
'unidic': 'unidic>=1.0.2',
'unidic_lite': 'unidic_lite>=1.0.7',
'urllib3': 'urllib3<2.0.0',
'uvicorn': 'uvicorn',
}
| 29
|
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCamelCase__ ( A__ : Dict , A__ : Optional[int]=False ):
'''simple docstring'''
try:
__lowerCamelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
__lowerCamelCase = default
else:
# KEY is set, convert it to True or False.
try:
__lowerCamelCase = strtobool(A__ )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'If set, {key} must be yes or no.' )
return _value
UpperCAmelCase_ = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Union[str, Any] ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(A__ )
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(A__ )
def lowerCamelCase__ ( A__ : List[Any] ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[int] ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(A__ )
def lowerCamelCase__ ( A__ : List[str] ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(A__ )
def lowerCamelCase__ ( A__ : Tuple=None , A__ : Optional[Any]=None ):
'''simple docstring'''
if test_case is None:
return partial(A__ , version=A__ )
return unittest.skipUnless(is_torch_version(""">=""" , A__ ) , f'test requires torch version >= {version}' )(A__ )
def lowerCamelCase__ ( A__ : Dict ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(A__ )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(A__ )
def lowerCamelCase__ ( A__ : str ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(A__ )
UpperCAmelCase_ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCamelCase__ ( A__ : Any ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(A__ )
class lowerCamelCase__( unittest.TestCase):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCAmelCase__ ( cls: int ):
__lowerCamelCase = tempfile.mkdtemp()
@classmethod
def lowerCAmelCase__ ( cls: Any ):
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowerCAmelCase__ ( self: Any ):
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob("""**/*""" ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(UpperCamelCase_ )
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: int ):
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple , UpperCamelCase_: Union[mock.Mock, List[mock.Mock]] ):
__lowerCamelCase = mocks if isinstance(UpperCamelCase_ , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCamelCase__ ( A__ : Optional[Any] ):
'''simple docstring'''
__lowerCamelCase = AcceleratorState()
__lowerCamelCase = tensor[None].clone().to(state.device )
__lowerCamelCase = gather(A__ ).cpu()
__lowerCamelCase = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , A__ ):
return False
return True
class lowerCamelCase__:
def __init__( self: Union[str, Any] , UpperCamelCase_: Dict , UpperCamelCase_: Any , UpperCamelCase_: Any ):
__lowerCamelCase = returncode
__lowerCamelCase = stdout
__lowerCamelCase = stderr
async def lowerCamelCase__ ( A__ : int , A__ : Any ):
'''simple docstring'''
while True:
__lowerCamelCase = await stream.readline()
if line:
callback(A__ )
else:
break
async def lowerCamelCase__ ( A__ : Dict , A__ : List[str]=None , A__ : Any=None , A__ : Optional[Any]=None , A__ : Tuple=False , A__ : List[Any]=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(A__ ) )
__lowerCamelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=A__ , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=A__ , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
__lowerCamelCase = []
__lowerCamelCase = []
def tee(A__ : int , A__ : Any , A__ : Optional[Any] , A__ : int="" ):
__lowerCamelCase = line.decode("""utf-8""" ).rstrip()
sink.append(A__ )
if not quiet:
print(A__ , A__ , file=A__ )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda A__ : tee(A__ , A__ , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda A__ : tee(A__ , A__ , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=A__ , )
return _RunOutput(await p.wait() , A__ , A__ )
def lowerCamelCase__ ( A__ : Optional[Any] , A__ : Any=None , A__ : Union[str, Any]=None , A__ : Dict=180 , A__ : str=False , A__ : List[Any]=True ):
'''simple docstring'''
__lowerCamelCase = asyncio.get_event_loop()
__lowerCamelCase = loop.run_until_complete(
_stream_subprocess(A__ , env=A__ , stdin=A__ , timeout=A__ , quiet=A__ , echo=A__ ) )
__lowerCamelCase = """ """.join(A__ )
if result.returncode > 0:
__lowerCamelCase = """\n""".join(result.stderr )
raise RuntimeError(
f'\'{cmd_str}\' failed with returncode {result.returncode}\n\n'
f'The combined stderr from workers follows:\n{stderr}' )
return result
class lowerCamelCase__( __lowerCamelCase):
pass
def lowerCamelCase__ ( A__ : List[str] , A__ : Union[str, Any]=False ):
'''simple docstring'''
try:
__lowerCamelCase = subprocess.check_output(A__ , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(A__ , """decode""" ):
__lowerCamelCase = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'Command `{" ".join(A__ )}` failed with the following error:\n\n{e.output.decode()}' ) from e
| 29
| 1
|
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase__ = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase__ = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase__ = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase__ = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase__ = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase__ = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
assert ReadMe.from_string(A__ , A__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
__lowercase = ReadMe.from_string(A__ , A__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with pytest.raises(A__ , match=re.escape(expected_error.format(path='''root''' ) ) ):
ReadMe.from_string(A__ , A__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ ):
"""simple docstring"""
ReadMe.from_string(A__ , A__ , suppress_parsing_errors=A__ )
@pytest.mark.parametrize(
'''readme_md, expected_dict''' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = ReadMe.from_readme(A__ , A__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = expected_error.format(path=A__ )
with pytest.raises(A__ , match=re.escape(A__ ) ):
__lowercase = ReadMe.from_readme(A__ , A__ )
readme.validate()
@pytest.mark.parametrize(
'''readme_md, expected_error''' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ , A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
__lowercase = expected_error.format(path=A__ )
with pytest.raises(A__ , match=re.escape(A__ ) ):
ReadMe.from_readme(A__ , A__ )
@pytest.mark.parametrize(
'''readme_md,''' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def _A ( A__ ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__lowercase = Path(A__ ) / '''README.md'''
with open(A__ , '''w+''' ) as readme_file:
readme_file.write(A__ )
ReadMe.from_readme(A__ , A__ , suppress_parsing_errors=A__ )
| 104
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'torchsde']
def __init__( self: int , *_SCREAMING_SNAKE_CASE: Optional[Any] , **_SCREAMING_SNAKE_CASE: str) -> Dict:
"""simple docstring"""
requires_backends(self , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Optional[int] , **_SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Dict , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Any) -> Optional[int]:
"""simple docstring"""
requires_backends(cls , ["torch", "torchsde"])
| 269
| 0
|
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[str] , snake_case_ : Optional[int] , snake_case_ : Tuple=13 , snake_case_ : Optional[int]=[30, 30] , snake_case_ : List[Any]=2 , snake_case_ : Any=3 , snake_case_ : Optional[Any]=True , snake_case_ : Dict=True , snake_case_ : Optional[int]=32 , snake_case_ : List[str]=5 , snake_case_ : Tuple=4 , snake_case_ : List[Any]=37 , snake_case_ : List[str]="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : int=10 , snake_case_ : str=0.02 , snake_case_ : Optional[int]=3 , snake_case_ : Dict=None , snake_case_ : Dict=8 , snake_case_ : Optional[Any]=10 , ):
snake_case__ : List[str] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Any = image_size
snake_case__ : int = patch_size
snake_case__ : Optional[int] = num_channels
snake_case__ : Optional[Any] = is_training
snake_case__ : Any = use_labels
snake_case__ : Any = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Dict = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : Union[str, Any] = scope
snake_case__ : List[Any] = n_targets
snake_case__ : str = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
snake_case__ : int = (image_size[1] // patch_size) * (image_size[0] // patch_size)
snake_case__ : int = num_patches + 1 + self.num_detection_tokens
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
snake_case__ : Tuple = []
for i in range(self.batch_size ):
snake_case__ : Any = {}
snake_case__ : Union[str, Any] = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=snake_case_ )
snake_case__ : str = torch.rand(self.n_targets , 4 , device=snake_case_ )
labels.append(snake_case_ )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels
def lowerCamelCase ( self : Dict ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def lowerCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Tuple ):
snake_case__ : Union[str, Any] = YolosModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def lowerCamelCase ( self : Tuple , snake_case_ : Optional[int] , snake_case_ : List[Any] , snake_case_ : str ):
snake_case__ : List[Any] = YolosForObjectDetection(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(pixel_values=snake_case_ )
snake_case__ : str = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
snake_case__ : List[Any] = model(pixel_values=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def lowerCamelCase ( self : int ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ : List[str] = config_and_inputs
snake_case__ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowercase = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
lowercase = (
{"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def lowerCamelCase ( self : int , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : List[str]=False ):
snake_case__ : Optional[Any] = super()._prepare_for_class(snake_case_ , snake_case_ , return_labels=snake_case_ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
snake_case__ : Dict = []
for i in range(self.model_tester.batch_size ):
snake_case__ : Union[str, Any] = {}
snake_case__ : Dict = torch.ones(
size=(self.model_tester.n_targets,) , device=snake_case_ , dtype=torch.long )
snake_case__ : Tuple = torch.ones(
self.model_tester.n_targets , 4 , device=snake_case_ , dtype=torch.float )
labels.append(snake_case_ )
snake_case__ : Optional[int] = labels
return inputs_dict
def lowerCamelCase ( self : Tuple ):
snake_case__ : Any = YolosModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def lowerCamelCase ( self : Any ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : str ):
# YOLOS does not use inputs_embeds
pass
def lowerCamelCase ( self : Union[str, Any] ):
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCamelCase ( self : List[Any] ):
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(snake_case_ )
snake_case__ : Dict = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCamelCase ( self : List[Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCamelCase ( self : Any ):
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = True
# in YOLOS, the seq_len is different
snake_case__ : List[Any] = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
snake_case__ : Dict = True
snake_case__ : str = False
snake_case__ : Dict = True
snake_case__ : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : int = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Optional[Any] = True
snake_case__ : Tuple = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : int = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Optional[int] = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
snake_case__ : int = len(snake_case_ )
# Check attention is always last and order is fine
snake_case__ : Tuple = True
snake_case__ : Any = True
snake_case__ : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Optional[Any] = 1
self.assertEqual(out_len + added_hidden_states , len(snake_case_ ) )
snake_case__ : Any = outputs.attentions
self.assertEqual(len(snake_case_ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowerCamelCase ( self : Union[str, Any] ):
def check_hidden_states_output(snake_case_ : int , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : str = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
snake_case__ : Any = outputs.hidden_states
snake_case__ : Dict = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# YOLOS has a different seq_length
snake_case__ : str = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Dict = True
check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase ( self : int ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*snake_case_ )
@slow
def lowerCamelCase ( self : Union[str, Any] ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[Any] = YolosModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def __snake_case( ) -> Union[str, Any]:
snake_case__ : Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase ( self : List[str] ):
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def lowerCamelCase ( self : Optional[int] ):
snake_case__ : int = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(snake_case_ )
snake_case__ : int = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : List[Any] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Optional[Any] = model(inputs.pixel_values )
# verify outputs
snake_case__ : Any = torch.Size((1, 100, 92) )
self.assertEqual(outputs.logits.shape , snake_case_ )
snake_case__ : Dict = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=snake_case_ , )
snake_case__ : Optional[Any] = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , snake_case_ , atol=1E-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , snake_case_ , atol=1E-4 ) )
# verify postprocessing
snake_case__ : Optional[int] = image_processor.post_process_object_detection(
snake_case_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
snake_case__ : int = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(snake_case_ )
snake_case__ : Optional[Any] = [75, 75, 17, 63, 17]
snake_case__ : int = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(snake_case_ )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , snake_case_ , atol=1E-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , snake_case_ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , snake_case_ ) )
| 43
|
'''simple docstring'''
import numpy as np
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1e-12 , _lowerCAmelCase = 100 , ) -> tuple[float, np.ndarray]:
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(_lowerCAmelCase )[0] == np.shape(_lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(_lowerCAmelCase ) == np.iscomplexobj(_lowerCAmelCase )
snake_case__ : str = np.iscomplexobj(_lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(_lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
snake_case__ : Tuple = False
snake_case__ : Any = 0
snake_case__ : List[str] = 0
snake_case__ : Dict = 1e12
while not convergence:
# Multiple matrix by the vector.
snake_case__ : Optional[int] = np.dot(_lowerCAmelCase , _lowerCAmelCase )
# Normalize the resulting output vector.
snake_case__ : Optional[Any] = w / np.linalg.norm(_lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
snake_case__ : List[str] = vector.conj().T if is_complex else vector.T
snake_case__ : Optional[int] = np.dot(_lowerCAmelCase , np.dot(_lowerCAmelCase , _lowerCAmelCase ) )
# Check convergence.
snake_case__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
snake_case__ : Dict = True
snake_case__ : Union[str, Any] = lambda_
if is_complex:
snake_case__ : int = np.real(lambda_ )
return lambda_, vector
def __snake_case( ) -> None:
snake_case__ : int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
snake_case__ : Tuple = np.array([41, 4, 20] )
snake_case__ : Dict = real_input_matrix.astype(np.complexaaa )
snake_case__ : Optional[int] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
snake_case__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
snake_case__ : Dict = real_input_matrix
snake_case__ : Optional[Any] = real_vector
elif problem_type == "complex":
snake_case__ : Optional[Any] = complex_input_matrix
snake_case__ : Optional[Any] = complex_vector
# Our implementation.
snake_case__ , snake_case__ : Tuple = power_iteration(_lowerCAmelCase , _lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
snake_case__ , snake_case__ : Dict = np.linalg.eigh(_lowerCAmelCase )
# Last eigenvalue is the maximum one.
snake_case__ : Optional[int] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
snake_case__ : Any = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(_lowerCAmelCase ) - np.abs(_lowerCAmelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43
| 1
|
'''simple docstring'''
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
UpperCamelCase__ : Union[str, Any] = 'src/diffusers'
UpperCamelCase__ : Any = '.'
# This is to make sure the diffusers module imported is the one in the repo.
UpperCamelCase__ : Tuple = importlib.util.spec_from_file_location(
'diffusers',
os.path.join(DIFFUSERS_PATH, '__init__.py'),
submodule_search_locations=[DIFFUSERS_PATH],
)
UpperCamelCase__ : str = spec.loader.load_module()
def UpperCAmelCase ( a_ , a_ ) -> Optional[Any]:
"""simple docstring"""
return line.startswith(a_ ) or len(a_ ) <= 1 or re.search(R"""^\s*\)(\s*->.*:|:)\s*$""" , a_ ) is not None
def UpperCAmelCase ( a_ ) -> Optional[Any]:
"""simple docstring"""
A_ : int = object_name.split(""".""" )
A_ : Optional[int] = 0
# First let's find the module where our object lives.
A_ : int = parts[i]
while i < len(a_ ) and not os.path.isfile(os.path.join(a_ , F"{module}.py" ) ):
i += 1
if i < len(a_ ):
A_ : int = os.path.join(a_ , parts[i] )
if i >= len(a_ ):
raise ValueError(F"`object_name` should begin with the name of a module of diffusers but got {object_name}." )
with open(os.path.join(a_ , F"{module}.py" ) , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A_ : List[str] = f.readlines()
# Now let's find the class / func in the code!
A_ : List[Any] = """"""
A_ : Tuple = 0
for name in parts[i + 1 :]:
while (
line_index < len(a_ ) and re.search(RF"^{indent}(class|def)\s+{name}(\(|\:)" , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(a_ ):
raise ValueError(F" {object_name} does not match any function or class in {module}." )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A_ : Any = line_index
while line_index < len(a_ ) and _should_continue(lines[line_index] , a_ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A_ : Any = lines[start_index:line_index]
return "".join(a_ )
UpperCamelCase__ : Any = re.compile(r'^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)')
UpperCamelCase__ : Optional[int] = re.compile(r'^\s*(\S+)->(\S+)(\s+.*|$)')
UpperCamelCase__ : int = re.compile(r'<FILL\s+[^>]*>')
def UpperCAmelCase ( a_ ) -> str:
"""simple docstring"""
A_ : List[str] = code.split("""\n""" )
A_ : Tuple = 0
while idx < len(a_ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(a_ ):
return re.search(R"""^(\s*)\S""" , lines[idx] ).groups()[0]
return ""
def UpperCAmelCase ( a_ ) -> Optional[int]:
"""simple docstring"""
A_ : Optional[int] = len(get_indent(a_ ) ) > 0
if has_indent:
A_ : Optional[Any] = F"class Bla:\n{code}"
A_ : Dict = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_1_9 , preview=a_ )
A_ : Dict = black.format_str(a_ , mode=a_ )
A_ , A_ : str = style_docstrings_in_code(a_ )
return result[len("""class Bla:\n""" ) :] if has_indent else result
def UpperCAmelCase ( a_ , a_=False ) -> Dict:
"""simple docstring"""
with open(a_ , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
A_ : int = f.readlines()
A_ : Union[str, Any] = []
A_ : Tuple = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(a_ ):
A_ : Optional[int] = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A_ , A_ , A_ : Any = search.groups()
A_ : Any = find_code_in_diffusers(a_ )
A_ : Optional[int] = get_indent(a_ )
A_ : List[str] = line_index + 1 if indent == theoretical_indent else line_index + 2
A_ : List[Any] = theoretical_indent
A_ : Optional[int] = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A_ : str = True
while line_index < len(a_ ) and should_continue:
line_index += 1
if line_index >= len(a_ ):
break
A_ : str = lines[line_index]
A_ : Tuple = _should_continue(a_ , a_ ) and re.search(F"^{indent}# End copy" , a_ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A_ : Optional[int] = lines[start_index:line_index]
A_ : Any = """""".join(a_ )
# Remove any nested `Copied from` comments to avoid circular copies
A_ : str = [line for line in theoretical_code.split("""\n""" ) if _re_copy_warning.search(a_ ) is None]
A_ : Optional[Any] = """\n""".join(a_ )
# Before comparing, use the `replace_pattern` on the original code.
if len(a_ ) > 0:
A_ : Union[str, Any] = replace_pattern.replace("""with""" , """""" ).split(""",""" )
A_ : List[Any] = [_re_replace_pattern.search(a_ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A_ , A_ , A_ : Tuple = pattern.groups()
A_ : int = re.sub(a_ , a_ , a_ )
if option.strip() == "all-casing":
A_ : Optional[int] = re.sub(obja.lower() , obja.lower() , a_ )
A_ : str = re.sub(obja.upper() , obja.upper() , a_ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A_ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code )
A_ : Optional[int] = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A_ : Optional[int] = lines[:start_index] + [theoretical_code] + lines[line_index:]
A_ : Optional[int] = start_index + 1
if overwrite and len(a_ ) > 0:
# Warn the user a file has been modified.
print(F"Detected changes, rewriting {filename}." )
with open(a_ , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(a_ )
return diffs
def UpperCAmelCase ( a_ = False ) -> Union[str, Any]:
"""simple docstring"""
A_ : List[Any] = glob.glob(os.path.join(a_ , """**/*.py""" ) , recursive=a_ )
A_ : str = []
for filename in all_files:
A_ : List[Any] = is_copy_consistent(a_ , a_ )
diffs += [F"- {filename}: copy does not match {d[0]} at line {d[1]}" for d in new_diffs]
if not overwrite and len(a_ ) > 0:
A_ : Optional[int] = """\n""".join(a_ )
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""" )
if __name__ == "__main__":
UpperCamelCase__ : str = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
UpperCamelCase__ : List[Any] = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 344
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Any = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class _lowerCAmelCase ( __A ):
"""simple docstring"""
lowerCamelCase = '''distilbert'''
lowerCamelCase = {
'''hidden_size''': '''dim''',
'''num_attention_heads''': '''n_heads''',
'''num_hidden_layers''': '''n_layers''',
}
def __init__( self , _lowerCamelCase=3_0522 , _lowerCamelCase=512 , _lowerCamelCase=False , _lowerCamelCase=6 , _lowerCamelCase=12 , _lowerCamelCase=768 , _lowerCamelCase=4 * 768 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase="gelu" , _lowerCamelCase=0.02 , _lowerCamelCase=0.1 , _lowerCamelCase=0.2 , _lowerCamelCase=0 , **_lowerCamelCase , ) -> Optional[Any]:
A_ : Tuple = vocab_size
A_ : List[Any] = max_position_embeddings
A_ : int = sinusoidal_pos_embds
A_ : int = n_layers
A_ : str = n_heads
A_ : Optional[int] = dim
A_ : int = hidden_dim
A_ : Tuple = dropout
A_ : List[Any] = attention_dropout
A_ : int = activation
A_ : Dict = initializer_range
A_ : List[Any] = qa_dropout
A_ : int = seq_classif_dropout
super().__init__(**_lowerCamelCase , pad_token_id=_lowerCamelCase )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@property
def UpperCAmelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : int = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 344
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : Any = 384
snake_case : int = 7
if "tiny" in model_name:
snake_case : List[str] = 96
snake_case : str = (2, 2, 6, 2)
snake_case : Optional[Any] = (3, 6, 12, 24)
elif "small" in model_name:
snake_case : Tuple = 96
snake_case : List[Any] = (2, 2, 18, 2)
snake_case : List[str] = (3, 6, 12, 24)
elif "base" in model_name:
snake_case : str = 128
snake_case : List[Any] = (2, 2, 18, 2)
snake_case : Any = (4, 8, 16, 32)
snake_case : Tuple = 12
snake_case : Optional[Any] = 512
elif "large" in model_name:
snake_case : str = 192
snake_case : Optional[Any] = (2, 2, 18, 2)
snake_case : Dict = (6, 12, 24, 48)
snake_case : Any = 12
snake_case : Optional[Any] = 768
# set label information
snake_case : Union[str, Any] = 150
snake_case : Optional[Any] = """huggingface/label-files"""
snake_case : List[str] = """ade20k-id2label.json"""
snake_case : Any = json.load(open(hf_hub_download(lowercase ,lowercase ,repo_type="""dataset""" ) ,"""r""" ) )
snake_case : Union[str, Any] = {int(lowercase ): v for k, v in idalabel.items()}
snake_case : Union[str, Any] = {v: k for k, v in idalabel.items()}
snake_case : Optional[int] = SwinConfig(
embed_dim=lowercase ,depths=lowercase ,num_heads=lowercase ,window_size=lowercase ,out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ,)
snake_case : Dict = UperNetConfig(
backbone_config=lowercase ,auxiliary_in_channels=lowercase ,num_labels=lowercase ,idalabel=lowercase ,labelaid=lowercase ,)
return config
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Tuple:
snake_case : Dict = []
# fmt: off
# stem
rename_keys.append(("""backbone.patch_embed.projection.weight""", """backbone.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.patch_embed.projection.bias""", """backbone.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.patch_embed.norm.weight""", """backbone.embeddings.norm.weight""") )
rename_keys.append(("""backbone.patch_embed.norm.bias""", """backbone.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> List[Any]:
snake_case : List[str] = dct.pop(lowercase )
snake_case : str = val
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> Dict:
snake_case : Optional[Any] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case : Optional[int] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case : Tuple = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
snake_case : str = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case : int = in_proj_weight[:dim, :]
snake_case : Optional[Any] = in_proj_bias[: dim]
snake_case : Optional[Any] = in_proj_weight[
dim : dim * 2, :
]
snake_case : Tuple = in_proj_bias[
dim : dim * 2
]
snake_case : List[Any] = in_proj_weight[
-dim :, :
]
snake_case : Any = in_proj_bias[-dim :]
# fmt: on
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> int:
snake_case : Optional[Any] = x.shape
snake_case : Dict = x.reshape(lowercase ,4 ,in_channel // 4 )
snake_case : Optional[int] = x[:, [0, 2, 1, 3], :].transpose(1 ,2 ).reshape(lowercase ,lowercase )
return x
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Dict:
snake_case : int = x.shape
snake_case : Union[str, Any] = x.reshape(lowercase ,in_channel // 4 ,4 )
snake_case : List[Any] = x[:, :, [0, 2, 1, 3]].transpose(1 ,2 ).reshape(lowercase ,lowercase )
return x
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[str]:
snake_case : Dict = x.shape[0]
snake_case : Optional[int] = x.reshape(4 ,in_channel // 4 )
snake_case : int = x[[0, 2, 1, 3], :].transpose(0 ,1 ).reshape(lowercase )
return x
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> Any:
snake_case : Optional[Any] = x.shape[0]
snake_case : Dict = x.reshape(in_channel // 4 ,4 )
snake_case : str = x[:, [0, 2, 1, 3]].transpose(0 ,1 ).reshape(lowercase )
return x
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ) -> Any:
snake_case : List[Any] = {
"""upernet-swin-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth""",
"""upernet-swin-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth""",
"""upernet-swin-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth""",
"""upernet-swin-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth""",
}
snake_case : List[str] = model_name_to_url[model_name]
snake_case : Dict = torch.hub.load_state_dict_from_url(lowercase ,map_location="""cpu""" ,file_name=lowercase )[
"""state_dict"""
]
for name, param in state_dict.items():
print(lowercase ,param.shape )
snake_case : Tuple = get_upernet_config(lowercase )
snake_case : Tuple = UperNetForSemanticSegmentation(lowercase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
snake_case : List[Any] = state_dict.pop(lowercase )
if "bn" in key:
snake_case : Dict = key.replace("""bn""" ,"""batch_norm""" )
snake_case : Optional[Any] = val
# rename keys
snake_case : Dict = create_rename_keys(lowercase )
for src, dest in rename_keys:
rename_key(lowercase ,lowercase ,lowercase )
read_in_q_k_v(lowercase ,config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
snake_case : Tuple = reverse_correct_unfold_reduction_order(lowercase )
if "norm" in key:
snake_case : str = reverse_correct_unfold_norm_order(lowercase )
model.load_state_dict(lowercase )
# verify on image
snake_case : Union[str, Any] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
snake_case : List[Any] = Image.open(requests.get(lowercase ,stream=lowercase ).raw ).convert("""RGB""" )
snake_case : int = SegformerImageProcessor()
snake_case : str = processor(lowercase ,return_tensors="""pt""" ).pixel_values
with torch.no_grad():
snake_case : Tuple = model(lowercase )
snake_case : str = outputs.logits
print(logits.shape )
print("""First values of logits:""" ,logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
snake_case : Tuple = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
snake_case : List[Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
snake_case : Tuple = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
snake_case : Optional[int] = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("""Logits:""" ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,lowercase ,atol=1E-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowercase )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-swin-tiny',
type=str,
choices=[f"""upernet-swin-{size}""" for size in ['tiny', 'small', 'base', 'large']],
help='Name of the Swin + UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCamelCase : Optional[Any] = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 368
|
from math import pow
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ,lowercase ,lowercase ,lowercase ,) -> tuple[int, int]:
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
snake_case : Union[str, Any] = int(pow(lowercase ,lowercase ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
snake_case , snake_case : List[Any] = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
snake_case , snake_case : str = backtrack(
lowercase ,lowercase ,current_number + 1 ,lowercase ,lowercase )
return current_sum, solutions_count
def SCREAMING_SNAKE_CASE__ ( lowercase ,lowercase ) -> int:
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowercase ,lowercase ,1 ,0 ,0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 176
| 0
|
'''simple docstring'''
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def __lowerCAmelCase ( ) -> tuple[list[int], int]:
__lowerCamelCase = [randint(-10_00 , 10_00 ) for i in range(10 )]
__lowerCamelCase = randint(-50_00 , 50_00 )
return (arr, r)
__UpperCAmelCase =make_dataset()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> tuple[int, ...]:
for triplet in permutations(UpperCamelCase__ , 3 ):
if sum(UpperCamelCase__ ) == target:
return tuple(sorted(UpperCamelCase__ ) )
return (0, 0, 0)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> tuple[int, int, int]:
arr.sort()
__lowerCamelCase = len(UpperCamelCase__ )
for i in range(n - 1 ):
__lowerCamelCase , __lowerCamelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def __lowerCAmelCase ( ) -> tuple[float, float]:
__lowerCamelCase = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__lowerCamelCase = '''
triplet_sum1(*dataset)
'''
__lowerCamelCase = '''
triplet_sum2(*dataset)
'''
__lowerCamelCase = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_00_00 )
__lowerCamelCase = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_00_00 )
return (min(UpperCamelCase__ ), min(UpperCamelCase__ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__UpperCAmelCase =solution_times()
print(f'The time for naive implementation is {times[0]}.')
print(f'The time for optimized implementation is {times[1]}.')
| 67
|
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
_UpperCamelCase = 200
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
_UpperCamelCase = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
_UpperCamelCase = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1000))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = len([g for position, g in enumerate(lowercase__ ) if g == main_target[position]] )
return (item, float(lowercase__ ))
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : str = random.randint(0 , len(lowercase__ ) - 1 )
__lowerCAmelCase : int = parent_a[:random_slice] + parent_a[random_slice:]
__lowerCAmelCase : Dict = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = list(lowercase__ )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
__lowerCAmelCase : int = random.choice(lowercase__ )
return "".join(lowercase__ )
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , ):
__lowerCAmelCase : str = []
# Generate more children proportionally to the fitness score.
__lowerCAmelCase : str = int(parent_a[1] * 1_0_0 ) + 1
__lowerCAmelCase : Optional[Any] = 1_0 if child_n >= 1_0 else child_n
for _ in range(lowercase__ ):
__lowerCAmelCase : List[Any] = population_score[random.randint(0 , lowercase__ )][0]
__lowerCAmelCase, __lowerCAmelCase : Dict = crossover(parent_a[0] , lowercase__ )
# Append new string to the population list.
pop.append(mutate(lowercase__ , lowercase__ ) )
pop.append(mutate(lowercase__ , lowercase__ ) )
return pop
def _lowercase ( lowercase__ , lowercase__ , lowercase__ = True ):
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
__lowerCAmelCase : int = f"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(lowercase__ )
# Verify that the target contains no genes besides the ones inside genes variable.
__lowerCAmelCase : Any = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
__lowerCAmelCase : List[str] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(lowercase__ )
# Generate random starting population.
__lowerCAmelCase : List[Any] = []
for _ in range(lowercase__ ):
population.append(''''''.join([random.choice(lowercase__ ) for i in range(len(lowercase__ ) )] ) )
# Just some logs to know what the algorithms is doing.
__lowerCAmelCase, __lowerCAmelCase : Tuple = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(lowercase__ )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
__lowerCAmelCase : Any = [evaluate(lowercase__ , lowercase__ ) for item in population]
# Check if there is a matching evolution.
__lowerCAmelCase : Union[str, Any] = sorted(lowercase__ , key=lambda lowercase__ : x[1] , reverse=lowercase__ )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
f"""\nGeneration: {generation}"""
f"""\nTotal Population:{total_population}"""
f"""\nBest score: {population_score[0][1]}"""
f"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
__lowerCAmelCase : Tuple = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(lowercase__ )
# Normalize population score to be between 0 and 1.
__lowerCAmelCase : List[Any] = [
(item, score / len(lowercase__ )) for item, score in population_score
]
# This is selection
for i in range(lowercase__ ):
population.extend(select(population_score[int(lowercase__ )] , lowercase__ , lowercase__ ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(lowercase__ ) > N_POPULATION:
break
if __name__ == "__main__":
_UpperCamelCase = (
"This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"
)
_UpperCamelCase = list(
" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"
"nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"
)
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
)
| 275
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 48
|
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """wavlm"""
def __init__( self : List[str] , _lowerCAmelCase : List[Any]=3_2 , _lowerCAmelCase : int=7_6_8 , _lowerCAmelCase : Any=1_2 , _lowerCAmelCase : Union[str, Any]=1_2 , _lowerCAmelCase : List[Any]=3_0_7_2 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Optional[Any]=0.1 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : Dict=1e-5 , _lowerCAmelCase : List[Any]="group" , _lowerCAmelCase : Optional[Any]="gelu" , _lowerCAmelCase : Dict=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _lowerCAmelCase : Any=(5, 2, 2, 2, 2, 2, 2) , _lowerCAmelCase : Optional[Any]=(1_0, 3, 3, 3, 3, 2, 2) , _lowerCAmelCase : Optional[int]=False , _lowerCAmelCase : int=1_2_8 , _lowerCAmelCase : Tuple=1_6 , _lowerCAmelCase : Optional[int]=3_2_0 , _lowerCAmelCase : Union[str, Any]=8_0_0 , _lowerCAmelCase : Optional[Any]=False , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=0.05 , _lowerCAmelCase : List[Any]=1_0 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : List[Any]=0.0 , _lowerCAmelCase : Union[str, Any]=1_0 , _lowerCAmelCase : List[Any]=3_2_0 , _lowerCAmelCase : int=2 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Optional[int]=1_0_0 , _lowerCAmelCase : Tuple=2_5_6 , _lowerCAmelCase : Union[str, Any]=2_5_6 , _lowerCAmelCase : Any=0.1 , _lowerCAmelCase : Tuple="mean" , _lowerCAmelCase : Any=False , _lowerCAmelCase : Union[str, Any]=False , _lowerCAmelCase : Any=2_5_6 , _lowerCAmelCase : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , _lowerCAmelCase : Dict=(5, 3, 3, 1, 1) , _lowerCAmelCase : Dict=(1, 2, 3, 1, 1) , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[int]=8_0 , _lowerCAmelCase : Any=0 , _lowerCAmelCase : int=1 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : Any=3 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : List[str]=None , **_lowerCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(**_lowerCAmelCase , pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase)
__lowercase =hidden_size
__lowercase =feat_extract_norm
__lowercase =feat_extract_activation
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =conv_bias
__lowercase =num_buckets
__lowercase =max_bucket_distance
__lowercase =num_conv_pos_embeddings
__lowercase =num_conv_pos_embedding_groups
__lowercase =len(self.conv_dim)
__lowercase =num_hidden_layers
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =num_attention_heads
__lowercase =hidden_dropout
__lowercase =attention_dropout
__lowercase =activation_dropout
__lowercase =feat_proj_dropout
__lowercase =final_dropout
__lowercase =layerdrop
__lowercase =layer_norm_eps
__lowercase =initializer_range
__lowercase =num_ctc_classes
__lowercase =vocab_size
__lowercase =do_stable_layer_norm
__lowercase =use_weighted_layer_sum
__lowercase =classifier_proj_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__lowercase =apply_spec_augment
__lowercase =mask_time_prob
__lowercase =mask_time_length
__lowercase =mask_time_min_masks
__lowercase =mask_feature_prob
__lowercase =mask_feature_length
# parameters for pretraining with codevector quantized representations
__lowercase =num_codevectors_per_group
__lowercase =num_codevector_groups
__lowercase =contrastive_logits_temperature
__lowercase =num_negatives
__lowercase =codevector_dim
__lowercase =proj_codevector_dim
__lowercase =diversity_loss_weight
# ctc loss
__lowercase =ctc_loss_reduction
__lowercase =ctc_zero_infinity
# adapter
__lowercase =add_adapter
__lowercase =adapter_kernel_size
__lowercase =adapter_stride
__lowercase =num_adapter_layers
__lowercase =output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__lowercase =classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =list(_lowerCAmelCase)
__lowercase =xvector_output_dim
@property
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1)
| 48
| 1
|
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase=1_3 , _UpperCamelCase=3_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=[1_0, 2_0, 3_0, 4_0] , _UpperCamelCase=[2, 2, 3, 2] , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=3_7 , _UpperCamelCase="gelu" , _UpperCamelCase=1_0 , _UpperCamelCase=0.02 , _UpperCamelCase=["stage2", "stage3", "stage4"] , _UpperCamelCase=3 , _UpperCamelCase=None , ) -> Tuple:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Any = batch_size
UpperCAmelCase_ : Union[str, Any] = image_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Union[str, Any] = num_stages
UpperCAmelCase_ : Dict = hidden_sizes
UpperCAmelCase_ : Union[str, Any] = depths
UpperCAmelCase_ : Dict = is_training
UpperCAmelCase_ : List[Any] = use_labels
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : List[Any] = hidden_act
UpperCAmelCase_ : List[Any] = type_sequence_label_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[Any] = out_features
UpperCAmelCase_ : str = num_labels
UpperCAmelCase_ : Tuple = scope
UpperCAmelCase_ : List[Any] = num_stages
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : List[str] = None
if self.use_labels:
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Dict = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ) -> Tuple:
return ConvNextConfig(
num_channels=self.num_channels , num_stages=self.num_stages , hidden_sizes=self.hidden_sizes , depths=self.depths , is_training=self.is_training , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , out_features=self.out_features , )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return UperNetConfig(
backbone_config=self.get_backbone_config() , hidden_size=5_1_2 , pool_scales=[1, 2, 3, 6] , use_auxiliary_head=_UpperCamelCase , auxiliary_loss_weight=0.4 , auxiliary_in_channels=4_0 , auxiliary_channels=2_5_6 , auxiliary_num_convs=1 , auxiliary_concat_input=_UpperCamelCase , loss_ignore_index=2_5_5 , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : Optional[int] = UperNetForSemanticSegmentation(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
UpperCAmelCase_ : int = model(_UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCAmelCase ( self ) -> Optional[int]:
UpperCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : List[str] = config_and_inputs
UpperCAmelCase_ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : str = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_snake_case : List[str] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_snake_case : Tuple = False
_snake_case : Optional[Any] = False
_snake_case : Optional[Any] = False
_snake_case : List[str] = False
_snake_case : Any = False
_snake_case : Dict = False
def __UpperCAmelCase ( self ) -> List[str]:
UpperCAmelCase_ : Optional[int] = UperNetModelTester(self )
UpperCAmelCase_ : int = ConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ) -> List[Any]:
return
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(_UpperCamelCase )
UpperCAmelCase_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCamelCase )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def __UpperCAmelCase ( self ) -> Any:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __UpperCAmelCase ( self ) -> List[Any]:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def __UpperCAmelCase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def __UpperCAmelCase ( self ) -> int:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self ) -> str:
pass
def __UpperCAmelCase ( self ) -> Optional[int]:
def check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = model_class(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : int = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase ) )
UpperCAmelCase_ : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCAmelCase_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(_UpperCamelCase ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __UpperCAmelCase ( self ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : int = _config_zero_init(_UpperCamelCase )
UpperCAmelCase_ : List[str] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[str] = model_class(config=_UpperCamelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@unittest.skip(reason='UperNet does not have tied weights' )
def __UpperCAmelCase ( self ) -> Optional[int]:
pass
@slow
def __UpperCAmelCase ( self ) -> List[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Tuple = UperNetForSemanticSegmentation.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : str = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
UpperCAmelCase_ : Optional[Any] = Image.open(__snake_case ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ) -> Tuple:
UpperCAmelCase_ : int = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
UpperCAmelCase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : Optional[Any] = processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = model(**_UpperCamelCase )
UpperCAmelCase_ : Any = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.tensor(
[[-7.59_58, -7.59_58, -7.43_02], [-7.59_58, -7.59_58, -7.43_02], [-7.47_97, -7.47_97, -7.30_68]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
UpperCAmelCase_ : str = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(_UpperCamelCase )
UpperCAmelCase_ : Any = prepare_img()
UpperCAmelCase_ : List[str] = processor(images=_UpperCamelCase , return_tensors='pt' ).to(_UpperCamelCase )
with torch.no_grad():
UpperCAmelCase_ : Optional[Any] = model(**_UpperCamelCase )
UpperCAmelCase_ : int = torch.Size((1, model.config.num_labels, 5_1_2, 5_1_2) )
self.assertEqual(outputs.logits.shape , _UpperCamelCase )
UpperCAmelCase_ : Dict = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] ).to(_UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 29
|
from __future__ import annotations
def lowercase__ ( __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
UpperCAmelCase_ : str = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
UpperCAmelCase_ : Optional[Any] = []
for position in positions:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(__snake_case )
return permissible_positions
def lowercase__ ( __snake_case : list[list[int]] ):
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def lowercase__ ( __snake_case : list[list[int]] , __snake_case : tuple[int, int] , __snake_case : int ):
'''simple docstring'''
if is_complete(__snake_case ):
return True
for position in get_valid_pos(__snake_case , len(__snake_case ) ):
UpperCAmelCase_ , UpperCAmelCase_ : Any = position
if board[y][x] == 0:
UpperCAmelCase_ : Optional[Any] = curr + 1
if open_knight_tour_helper(__snake_case , __snake_case , curr + 1 ):
return True
UpperCAmelCase_ : List[Any] = 0
return False
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = [[0 for i in range(__snake_case )] for j in range(__snake_case )]
for i in range(__snake_case ):
for j in range(__snake_case ):
UpperCAmelCase_ : Optional[Any] = 1
if open_knight_tour_helper(__snake_case , (i, j) , 1 ):
return board
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : List[str] = F"Open Kight Tour cannot be performed on a board of size {n}"
raise ValueError(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 29
| 1
|
'''simple docstring'''
import os
from collections.abc import Iterator
def lowerCAmelCase_ ( snake_case_ : str = "." ) -> Iterator[str]:
'''simple docstring'''
for dir_path, dir_names, filenames in os.walk(snake_case_ ):
UpperCAmelCase_ = [d for d in dir_names if d != "scripts" and d[0] not in "._"]
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case_ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case_ , snake_case_ ).lstrip("./" )
def lowerCAmelCase_ ( snake_case_ : Tuple ) -> List[Any]:
'''simple docstring'''
return f"""{i * " "}*""" if i else "\n##"
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : str ) -> str:
'''simple docstring'''
UpperCAmelCase_ = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case_ ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(snake_case_ )} {new_part.replace("_" , " " ).title()}""" )
return new_path
def lowerCAmelCase_ ( snake_case_ : str = "." ) -> None:
'''simple docstring'''
UpperCAmelCase_ = ""
for filepath in sorted(good_file_paths(snake_case_ ) ):
UpperCAmelCase_ , UpperCAmelCase_ = os.path.split(snake_case_ )
if filepath != old_path:
UpperCAmelCase_ = print_path(snake_case_ , snake_case_ )
UpperCAmelCase_ = (filepath.count(os.sep ) + 1) if filepath else 0
UpperCAmelCase_ = f"""{filepath}/{filename}""".replace(" " , "%20" )
UpperCAmelCase_ = os.path.splitext(filename.replace("_" , " " ).title() )[0]
print(f"""{md_prefix(snake_case_ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 106
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __A ( unittest.TestCase ):
@slow
def _lowercase (self : Dict ):
UpperCAmelCase_ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
UpperCAmelCase_ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
UpperCAmelCase_ = model(__a )["last_hidden_state"]
UpperCAmelCase_ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , __a )
# compare the actual values for a slice.
UpperCAmelCase_ = tf.convert_to_tensor(
[[[-0.02_54, 0.02_35, 0.10_27], [0.06_06, -0.18_11, -0.04_18], [-0.15_61, -0.11_27, 0.26_87]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 106
| 1
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None , ):
'''simple docstring'''
__UpperCamelCase :List[str] = {}
if train_file is not None:
__UpperCamelCase :Optional[Any] = [train_file]
if eval_file is not None:
__UpperCamelCase :Any = [eval_file]
if test_file is not None:
__UpperCamelCase :List[str] = [test_file]
__UpperCamelCase :int = datasets.load_dataset('''csv''' , data_files=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() )
__UpperCamelCase :int = features_name.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__UpperCamelCase :List[Any] = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE )}
__UpperCamelCase :str = tokenizer.model_input_names
__UpperCamelCase :Optional[int] = {}
if len(SCREAMING_SNAKE_CASE ) == 1:
for k in files.keys():
__UpperCamelCase :str = ds[k].map(
lambda SCREAMING_SNAKE_CASE : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='''max_length''' ) , batched=SCREAMING_SNAKE_CASE , )
elif len(SCREAMING_SNAKE_CASE ) == 2:
for k in files.keys():
__UpperCamelCase :int = ds[k].map(
lambda SCREAMING_SNAKE_CASE : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding='''max_length''' , ) , batched=SCREAMING_SNAKE_CASE , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__UpperCamelCase :List[Any] = {k: v for k, v in ex.items() if k in input_names}
__UpperCamelCase :Tuple = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__UpperCamelCase :List[str] = {k: v for k, v in ex.items() if k in input_names}
__UpperCamelCase :Dict = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__UpperCamelCase :Dict = {k: v for k, v in ex.items() if k in input_names}
__UpperCamelCase :Optional[int] = labelaid[ex[label_name]]
yield (d, label)
__UpperCamelCase :Optional[int] = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__UpperCamelCase :List[Any] = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__UpperCamelCase :str = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__UpperCamelCase :List[Any] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__UpperCamelCase :int = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__UpperCamelCase :Tuple = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__lowercase = logging.getLogger(__name__)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : int = field(metadata={"""help""": """Which column contains the label"""} )
a__ : str = field(default=UpperCAmelCase_ , metadata={"""help""": """The path of the training file"""} )
a__ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The path of the development file"""} )
a__ : Optional[str] = field(default=UpperCAmelCase_ , metadata={"""help""": """The path of the test file"""} )
a__ : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a__ : bool = field(
default=UpperCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
a__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
a__ : bool = field(default=UpperCAmelCase_ , metadata={"""help""": """Set this flag to use fast tokenization."""} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
a__ : Optional[str] = field(
default=UpperCAmelCase_ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase :str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase :Optional[int] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=SCREAMING_SNAKE_CASE , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__UpperCamelCase :Dict = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE ) , labelaid=SCREAMING_SNAKE_CASE , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__UpperCamelCase :str = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , )
def compute_metrics(SCREAMING_SNAKE_CASE ) -> Dict:
__UpperCamelCase :Dict = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__UpperCamelCase :Optional[Any] = TFTrainer(
model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , compute_metrics=SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__UpperCamelCase :int = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
__UpperCamelCase :List[str] = trainer.evaluate()
__UpperCamelCase :Optional[Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(SCREAMING_SNAKE_CASE , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(SCREAMING_SNAKE_CASE )
return results
if __name__ == "__main__":
main()
| 43
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase = 256
class lowerCamelCase_ ( UpperCAmelCase_ ):
'''simple docstring'''
a__ : Tuple = ["""melgan"""]
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> None:
super().__init__()
# From MELGAN
__UpperCamelCase :int = math.log(1E-5) # Matches MelGAN training.
__UpperCamelCase :int = 4.0 # Largest value for most examples
__UpperCamelCase :str = 128
self.register_modules(
notes_encoder=__lowercase , continuous_encoder=__lowercase , decoder=__lowercase , scheduler=__lowercase , melgan=__lowercase , )
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Dict:
__UpperCamelCase , __UpperCamelCase :str = output_range
if clip:
__UpperCamelCase :Union[str, Any] = torch.clip(__lowercase , self.min_value , self.max_value)
# Scale to [0, 1].
__UpperCamelCase :Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCamelCase__ ( self , __lowercase , __lowercase=(-1.0, 1.0) , __lowercase=False) -> Optional[int]:
__UpperCamelCase , __UpperCamelCase :int = input_range
__UpperCamelCase :Optional[int] = torch.clip(__lowercase , __lowercase , __lowercase) if clip else outputs
# Scale to [0, 1].
__UpperCamelCase :List[str] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> List[Any]:
__UpperCamelCase :List[str] = input_tokens > 0
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.notes_encoder(
encoder_input_tokens=__lowercase , encoder_inputs_mask=__lowercase)
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = self.continuous_encoder(
encoder_inputs=__lowercase , encoder_inputs_mask=__lowercase)
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase) -> str:
__UpperCamelCase :Optional[int] = noise_time
if not torch.is_tensor(__lowercase):
__UpperCamelCase :str = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device)
elif torch.is_tensor(__lowercase) and len(timesteps.shape) == 0:
__UpperCamelCase :Dict = timesteps[None].to(input_tokens.device)
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__UpperCamelCase :List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device)
__UpperCamelCase :Tuple = self.decoder(
encodings_and_masks=__lowercase , decoder_input_tokens=__lowercase , decoder_noise_time=__lowercase)
return logits
@torch.no_grad()
def __call__( self , __lowercase , __lowercase = None , __lowercase = 100 , __lowercase = True , __lowercase = "numpy" , __lowercase = None , __lowercase = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowercase , __lowercase) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(__lowercase)}.""")
__UpperCamelCase :Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa)
__UpperCamelCase :Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa)
__UpperCamelCase :Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
for i, encoder_input_tokens in enumerate(__lowercase):
if i == 0:
__UpperCamelCase :int = torch.from_numpy(pred_mel[:1].copy()).to(
device=self.device , dtype=self.decoder.dtype)
# The first chunk has no previous context.
__UpperCamelCase :int = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=__lowercase , device=self.device)
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
__UpperCamelCase :Tuple = ones
__UpperCamelCase :Optional[Any] = self.scale_features(
__lowercase , output_range=[-1.0, 1.0] , clip=__lowercase)
__UpperCamelCase :int = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens]).to(device=self.device) , continuous_inputs=__lowercase , continuous_mask=__lowercase , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
__UpperCamelCase :int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=__lowercase , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(__lowercase)
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps)):
__UpperCamelCase :Optional[int] = self.decode(
encodings_and_masks=__lowercase , input_tokens=__lowercase , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
__UpperCamelCase :int = self.scheduler.step(__lowercase , __lowercase , __lowercase , generator=__lowercase).prev_sample
__UpperCamelCase :Tuple = self.scale_to_features(__lowercase , input_range=[-1.0, 1.0])
__UpperCamelCase :List[Any] = mel[:1]
__UpperCamelCase :Optional[Any] = mel.cpu().float().numpy()
__UpperCamelCase :Any = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1)
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowercase , __lowercase)
logger.info('''Generated segment''' , __lowercase)
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''')
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''')
if output_type == "numpy":
__UpperCamelCase :Optional[Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa))
else:
__UpperCamelCase :List[str] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=__lowercase)
| 43
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import _LazyModule
A_ = {'''tokenization_tapex''': ['''TapexTokenizer''']}
if TYPE_CHECKING:
from .tokenization_tapex import TapexTokenizer
else:
import sys
A_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 361
|
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 10_00 ):
"""simple docstring"""
_snake_case , _snake_case : List[Any] = 1, 1
_snake_case : str = []
for i in range(1 , n + 1 ):
_snake_case : Any = prev_numerator + 2 * prev_denominator
_snake_case : Optional[Any] = prev_numerator + prev_denominator
if len(str(snake_case__ ) ) > len(str(snake_case__ ) ):
result.append(snake_case__ )
_snake_case : int = numerator
_snake_case : Any = denominator
return len(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 132
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
A__ : Tuple =logging.get_logger(__name__) # pylint: disable=invalid-name
A__ : Tuple ='''
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
'''
def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=8 ):
"""simple docstring"""
_lowerCAmelCase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class UpperCAmelCase ( _UpperCAmelCase ):
def __init__( self : str , __snake_case : UNetaDConditionModel , __snake_case : DDPMScheduler , __snake_case : VQModel , ) -> Optional[int]:
super().__init__()
self.register_modules(
unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , movq=UpperCAmelCase_ , )
_lowerCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def lowercase__ ( self : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : Optional[int] ) -> Optional[Any]:
if latents is None:
_lowerCAmelCase = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
_lowerCAmelCase = latents.to(UpperCAmelCase_ )
_lowerCAmelCase = latents * scheduler.init_noise_sigma
return latents
def lowercase__ ( self : Tuple , __snake_case : Tuple=0 ) -> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase = torch.device(f"cuda:{gpu_id}" )
_lowerCAmelCase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[int] , __snake_case : List[Any]=0 ) -> Dict:
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCAmelCase_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase = cpu_offload_with_hook(UpperCAmelCase_ , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ )
# We'll offload the last model manually.
_lowerCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def lowercase__ ( self : Dict ) -> Optional[Any]:
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase_ )
def __call__( self : Union[str, Any] , __snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case : Union[torch.FloatTensor, List[torch.FloatTensor]] , __snake_case : torch.FloatTensor , __snake_case : int = 5_12 , __snake_case : int = 5_12 , __snake_case : int = 1_00 , __snake_case : float = 4.0 , __snake_case : int = 1 , __snake_case : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __snake_case : Optional[torch.FloatTensor] = None , __snake_case : Optional[str] = "pil" , __snake_case : bool = True , ) -> Dict:
_lowerCAmelCase = self._execution_device
_lowerCAmelCase = guidance_scale > 1.0
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = torch.cat(UpperCAmelCase_ , dim=0 )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = torch.cat(UpperCAmelCase_ , dim=0 )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
_lowerCAmelCase = torch.cat(UpperCAmelCase_ , dim=0 )
_lowerCAmelCase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
_lowerCAmelCase = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
_lowerCAmelCase = negative_image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 )
_lowerCAmelCase = hint.repeat_interleave(UpperCAmelCase_ , dim=0 )
_lowerCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase_ )
_lowerCAmelCase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCAmelCase_ )
self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ )
_lowerCAmelCase = self.scheduler.timesteps
_lowerCAmelCase = self.movq.config.latent_channels
_lowerCAmelCase , _lowerCAmelCase = downscale_height_and_width(UpperCAmelCase_ , UpperCAmelCase_ , self.movq_scale_factor )
# create initial latent
_lowerCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase = {"""image_embeds""": image_embeds, """hint""": hint}
_lowerCAmelCase = self.unet(
sample=UpperCAmelCase_ , timestep=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , added_cond_kwargs=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase = variance_pred.chunk(2 )
_lowerCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase = self.scheduler.step(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ , )[0]
# post-processing
_lowerCAmelCase = self.movq.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_lowerCAmelCase = image * 0.5 + 0.5
_lowerCAmelCase = image.clamp(0 , 1 )
_lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase = self.numpy_to_pil(UpperCAmelCase_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase_ )
| 70
|
import argparse
import fairseq
import torch
from torch import nn
from transformers import (
MBartaaTokenizer,
MBartConfig,
MBartForCausalLM,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__snake_case = logging.get_logger(__name__)
__snake_case = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
__snake_case = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ = getattr(UpperCamelCase_ , UpperCamelCase_ ).shape
else:
SCREAMING_SNAKE_CASE__ = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ = value
else:
SCREAMING_SNAKE_CASE__ = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ = hf_model.feature_extractor
SCREAMING_SNAKE_CASE__ = hf_model.adapter
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE__ = True
elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ):
load_adapter(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE__ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ = name.split(UpperCamelCase_ )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE__ = mapped_key.replace('*' , UpperCamelCase_ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE__ = 'bias'
elif "weight" in name:
SCREAMING_SNAKE_CASE__ = 'weight'
else:
SCREAMING_SNAKE_CASE__ = None
set_recursively(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
continue
if not is_used:
unused_weights.append(UpperCamelCase_ )
logger.warning(F'Unused weights: {unused_weights}' )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
SCREAMING_SNAKE_CASE__ = int(items[0] )
SCREAMING_SNAKE_CASE__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = full_name.split('adaptor.' )[-1]
SCREAMING_SNAKE_CASE__ = name.split('.' )
if items[1].isdigit():
SCREAMING_SNAKE_CASE__ = int(items[1] )
else:
SCREAMING_SNAKE_CASE__ = None
if "adaptor" not in full_name:
if "proj_ln" in full_name:
# has to be layer norm
if "bias" in name:
assert (
value.shape == adapter.proj_layer_norm.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer norm bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj_layer_norm.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
else:
# has to be projection layer
if "bias" in name:
assert (
value.shape == adapter.proj.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer bias was initialized from {full_name}.' )
if "weight" in name:
assert (
value.shape == adapter.proj.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter proj layer weight was initialized from {full_name}.' )
elif isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if "bias" in name:
assert (
value.shape == adapter.layers[layer_id].conv.bias.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
elif "weight" in name:
assert (
value.shape == adapter.layers[layer_id].conv.weight.data.shape
), F'{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.'
SCREAMING_SNAKE_CASE__ = value
logger.info(F'Adapter layer {layer_id} bias was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase_ )
def _lowercase ( UpperCamelCase_ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = emb.weight.shape
SCREAMING_SNAKE_CASE__ = nn.Linear(UpperCamelCase_ , UpperCamelCase_ , bias=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = emb.weight.data
return lin_layer
@torch.no_grad()
def _lowercase ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = WavaVecaConfig.from_pretrained(
UpperCamelCase_ , add_adapter=UpperCamelCase_ , adapter_stride=UpperCamelCase_ , adapter_kernel_size=UpperCamelCase_ , use_auth_token=UpperCamelCase_ , output_hidden_size=UpperCamelCase_ , )
SCREAMING_SNAKE_CASE__ = MBartConfig.from_pretrained(UpperCamelCase_ )
# load model
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={
'config_yaml': config_yaml_path,
'data': '/'.join(dict_path.split('/' )[:-1] ),
'w2v_path': checkpoint_path,
'load_pretrained_decoder_from': None,
} , )
SCREAMING_SNAKE_CASE__ = model[0].eval()
# load feature extractor
SCREAMING_SNAKE_CASE__ = WavaVecaFeatureExtractor.from_pretrained(UpperCamelCase_ , use_auth_token=UpperCamelCase_ )
# set weights for wav2vec2 encoder
SCREAMING_SNAKE_CASE__ = WavaVecaModel(UpperCamelCase_ )
recursively_load_weights_wavaveca(model.encoder , UpperCamelCase_ )
# load decoder weights
SCREAMING_SNAKE_CASE__ = MBartForCausalLM(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCamelCase_ )
logger.warning(F'The following keys are missing when loading the decoder weights: {missing_keys}' )
logger.warning(F'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' )
SCREAMING_SNAKE_CASE__ = SpeechEncoderDecoderModel(encoder=UpperCamelCase_ , decoder=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = MBartaaTokenizer(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ = hf_wavavec.config.to_dict()
SCREAMING_SNAKE_CASE__ = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = 'mbart50'
SCREAMING_SNAKE_CASE__ = 'wav2vec2'
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = 250004
SCREAMING_SNAKE_CASE__ = tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ = SpeechEncoderDecoderConfig.from_dict(UpperCamelCase_ )
hf_wavavec.save_pretrained(UpperCamelCase_ )
feature_extractor.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_yaml_path""", default=None, type=str, help="""Path to yaml file of fine-tuned model""")
parser.add_argument(
"""--encoder_config_path""",
default="""facebook/wav2vec2-xls-r-1b""",
type=str,
help="""Path to hf encoder wav2vec2 checkpoint config""",
)
parser.add_argument(
"""--decoder_config_path""",
default="""facebook/mbart-large-50-one-to-many-mmt""",
type=str,
help="""Path to hf decoder checkpoint config""",
)
parser.add_argument("""--add_adapter""", default=True, type=bool, help="""whethere to add model adapter layers""")
parser.add_argument("""--adapter_stride""", default=2, type=int, help="""stride of adapter layers""")
parser.add_argument("""--adapter_kernel_size""", default=3, type=int, help="""kernel size of adapter layers""")
parser.add_argument("""--encoder_output_dim""", default=10_24, type=int, help="""encoder output dim""")
parser.add_argument("""--start_token_id""", default=25_00_04, type=int, help="""`decoder_start_token_id` of model config""")
__snake_case = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
args.config_yaml_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
add_adapter=args.add_adapter,
adapter_kernel_size=args.adapter_kernel_size,
adapter_stride=args.adapter_stride,
decoder_start_token_id=args.start_token_id,
encoder_output_dim=args.encoder_output_dim,
)
| 176
| 0
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = "▁"
lowercase__ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase__ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowercase__ = {
"facebook/m2m100_418M": 1024,
}
# fmt: off
lowercase__ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class A_ ( _snake_case ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : int = ["""input_ids""", """attention_mask"""]
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : List[int] = []
def __init__( self : List[Any] , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : Optional[Any]="<s>" , lowercase_ : Tuple="</s>" , lowercase_ : List[str]="</s>" , lowercase_ : Optional[Any]="<pad>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Optional[Any]="m2m100" , lowercase_ : Optional[Dict[str, Any]] = None , lowercase_ : List[str]=8 , **lowercase_ : Tuple , ) -> None:
UpperCAmelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase : Any = language_codes
UpperCAmelCase : Any = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase : List[Any] = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
UpperCAmelCase : Optional[int] = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(lowercase_ )
for lang_code in fairseq_language_code
if self.get_lang_token(lowercase_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=lowercase_ , tgt_lang=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , sep_token=lowercase_ , unk_token=lowercase_ , pad_token=lowercase_ , language_codes=lowercase_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=lowercase_ , **lowercase_ , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : Union[str, Any] = load_json(lowercase_ )
UpperCAmelCase : Optional[int] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : Dict = spm_file
UpperCAmelCase : Union[str, Any] = load_spm(lowercase_ , self.sp_model_kwargs )
UpperCAmelCase : Optional[Any] = len(self.encoder )
UpperCAmelCase : str = {
self.get_lang_token(lowercase_ ): self.encoder_size + i for i, lang_code in enumerate(lowercase_ )
}
UpperCAmelCase : str = {lang_code: self.encoder_size + i for i, lang_code in enumerate(lowercase_ )}
UpperCAmelCase : str = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase : int = src_lang if src_lang is not None else 'en'
UpperCAmelCase : List[Any] = tgt_lang
UpperCAmelCase : str = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase : Any = num_madeup_words
@property
def UpperCAmelCase_ ( self : Optional[int] ) -> int:
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def UpperCAmelCase_ ( self : Tuple ) -> str:
return self._src_lang
@src_lang.setter
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : str ) -> None:
UpperCAmelCase : List[str] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str ) -> List[str]:
return self.sp_model.encode(lowercase_ , out_type=lowercase_ )
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : Optional[Any] ) -> Dict:
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(lowercase_ , self.encoder[self.unk_token] )
def UpperCAmelCase_ ( self : Union[str, Any] , lowercase_ : int ) -> str:
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(lowercase_ , self.unk_token )
def UpperCAmelCase_ ( self : Any , lowercase_ : str ) -> List[str]:
UpperCAmelCase : Union[str, Any] = []
UpperCAmelCase : Dict = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(lowercase_ ) + token
UpperCAmelCase : Optional[int] = []
else:
current_sub_tokens.append(lowercase_ )
out_string += self.sp_model.decode(lowercase_ )
return out_string.strip()
def UpperCAmelCase_ ( self : List[str] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ )
UpperCAmelCase : str = [1] * len(self.prefix_tokens )
UpperCAmelCase : str = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase_ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase_ )) + ([0] * len(lowercase_ )) + suffix_ones
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase_ ( self : Dict ) -> Dict:
UpperCAmelCase : Any = {self.convert_ids_to_tokens(lowercase_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Dict ) -> Dict:
UpperCAmelCase : Tuple = self.__dict__.copy()
UpperCAmelCase : str = None
return state
def __setstate__( self : int , lowercase_ : Dict ) -> None:
UpperCAmelCase : Tuple = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase : Union[str, Any] = {}
UpperCAmelCase : Tuple = load_spm(self.spm_file , self.sp_model_kwargs )
def UpperCAmelCase_ ( self : Dict , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]:
UpperCAmelCase : List[Any] = Path(lowercase_ )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
UpperCAmelCase : Optional[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
UpperCAmelCase : List[Any] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder , lowercase_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowercase_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , lowercase_ )
elif not os.path.isfile(self.spm_file ):
with open(lowercase_ , 'wb' ) as fi:
UpperCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(lowercase_ )
return (str(lowercase_ ), str(lowercase_ ))
def UpperCAmelCase_ ( self : Dict , lowercase_ : List[str] , lowercase_ : str = "en" , lowercase_ : Optional[List[str]] = None , lowercase_ : str = "ro" , **lowercase_ : Tuple , ) -> BatchEncoding:
UpperCAmelCase : List[str] = src_lang
UpperCAmelCase : Dict = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(lowercase_ , lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[str] , lowercase_ : Optional[str] , **lowercase_ : str ) -> Union[str, Any]:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase : List[Any] = src_lang
UpperCAmelCase : int = self(lowercase_ , add_special_tokens=lowercase_ , **lowercase_ )
UpperCAmelCase : Tuple = self.get_lang_id(lowercase_ )
UpperCAmelCase : str = tgt_lang_id
return inputs
def UpperCAmelCase_ ( self : List[Any] ) -> Tuple:
self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase_ ( self : int ) -> int:
self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase_ ( self : int , lowercase_ : str ) -> None:
UpperCAmelCase : Optional[Any] = self.get_lang_token(lowercase_ )
UpperCAmelCase : Tuple = self.lang_token_to_id[lang_token]
UpperCAmelCase : int = [self.cur_lang_id]
UpperCAmelCase : Any = [self.eos_token_id]
def UpperCAmelCase_ ( self : List[Any] , lowercase_ : str ) -> None:
UpperCAmelCase : Any = self.get_lang_token(lowercase_ )
UpperCAmelCase : int = self.lang_token_to_id[lang_token]
UpperCAmelCase : int = [self.cur_lang_id]
UpperCAmelCase : List[str] = [self.eos_token_id]
def UpperCAmelCase_ ( self : str , lowercase_ : str ) -> str:
return self.lang_code_to_token[lang]
def UpperCAmelCase_ ( self : Any , lowercase_ : str ) -> int:
UpperCAmelCase : str = self.get_lang_token(lowercase_ )
return self.lang_token_to_id[lang_token]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : Optional[int] = sentencepiece.SentencePieceProcessor(**UpperCAmelCase_ )
spm.Load(str(UpperCAmelCase_ ) )
return spm
def UpperCamelCase( UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'r' ) as f:
return json.load(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=2 )
| 280
|
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class A_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Union[str, Any]=7 , lowercase_ : Union[str, Any]=3 , lowercase_ : int=30 , lowercase_ : Tuple=400 , lowercase_ : Tuple=True , lowercase_ : Optional[int]=None , lowercase_ : List[str]=0.9 , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=True , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , ) -> Tuple:
UpperCAmelCase : Optional[int] = size if size is not None else {'shortest_edge': 30}
UpperCAmelCase : int = crop_size if crop_size is not None else {'height': 30, 'width': 30}
UpperCAmelCase : Tuple = parent
UpperCAmelCase : Optional[Any] = batch_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : int = min_resolution
UpperCAmelCase : Optional[int] = max_resolution
UpperCAmelCase : str = do_resize_and_center_crop
UpperCAmelCase : int = size
UpperCAmelCase : Dict = crop_pct
UpperCAmelCase : Union[str, Any] = crop_size
UpperCAmelCase : Optional[int] = do_normalize
UpperCAmelCase : Optional[Any] = image_mean
UpperCAmelCase : Optional[Any] = image_std
def UpperCAmelCase_ ( self : str ) -> int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class A_ ( _snake_case , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase_ ( self : Dict ) -> str:
UpperCAmelCase : Any = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ ( self : Tuple ) -> str:
UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(lowercase_ , 'size' ) )
self.assertTrue(hasattr(lowercase_ , 'crop_pct' ) )
self.assertTrue(hasattr(lowercase_ , 'do_normalize' ) )
self.assertTrue(hasattr(lowercase_ , 'image_mean' ) )
self.assertTrue(hasattr(lowercase_ , 'image_std' ) )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 30} )
self.assertEqual(image_processor.crop_size , {'height': 30, 'width': 30} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> str:
pass
def UpperCAmelCase_ ( self : List[Any] ) -> List[str]:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : str = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : List[Any] ) -> Dict:
# Initialize image_processing
UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[Any] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def UpperCAmelCase_ ( self : str ) -> Dict:
# Initialize image_processing
UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
UpperCAmelCase : Optional[int] = image_processing(lowercase_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 280
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Tuple = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE__ : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return (data["data"], data["target"])
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : str = XGBClassifier()
classifier.fit(_lowercase , _lowercase )
return classifier
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = load_iris()
UpperCAmelCase_, UpperCAmelCase_ : str = data_handling(_lowercase )
UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_, UpperCAmelCase_ : Dict = train_test_split(
_lowercase , _lowercase , test_size=0.25 )
UpperCAmelCase_ : int = iris['''target_names''']
# Create an XGBoost Classifier from the training data
UpperCAmelCase_ : int = xgboost(_lowercase , _lowercase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_lowercase , _lowercase , _lowercase , display_labels=_lowercase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 235
|
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
UpperCAmelCase_ : Union[str, Any] = int(_lowercase )
assert noofclusters < len(_lowercase )
# Find out the dimensionality
UpperCAmelCase_ : Optional[int] = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCAmelCase_ : Optional[int] = list(range(len(_lowercase ) ) )
shuffle(_lowercase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCAmelCase_ : List[str] = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCAmelCase_ : Tuple = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCAmelCase_ : str = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(_lowercase )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCAmelCase_ : List[Any] = tf.placeholder('''float64''' , [dim] )
UpperCAmelCase_ : List[Any] = []
for centroid in centroids:
cent_assigns.append(tf.assign(_lowercase , _lowercase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCAmelCase_ : Optional[int] = [tf.Variable(0 ) for i in range(len(_lowercase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCAmelCase_ : List[Any] = tf.placeholder('''int32''' )
UpperCAmelCase_ : Union[str, Any] = []
for assignment in assignments:
cluster_assigns.append(tf.assign(_lowercase , _lowercase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCAmelCase_ : Tuple = tf.placeholder('''float''' , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCAmelCase_ : str = tf.reduce_mean(_lowercase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCAmelCase_ : List[Any] = tf.placeholder('''float''' , [dim] )
UpperCAmelCase_ : Tuple = tf.placeholder('''float''' , [dim] )
UpperCAmelCase_ : Optional[Any] = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(_lowercase , _lowercase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCAmelCase_ : int = tf.placeholder('''float''' , [noofclusters] )
UpperCAmelCase_ : Any = tf.argmin(_lowercase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCAmelCase_ : List[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(_lowercase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCAmelCase_ : Tuple = 100
for _ in range(_lowercase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(_lowercase ) ):
UpperCAmelCase_ : Optional[int] = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCAmelCase_ : Tuple = [
sess.run(_lowercase , feed_dict={va: vect, va: sess.run(_lowercase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCAmelCase_ : Dict = sess.run(
_lowercase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(_lowercase ):
# Collect all the vectors assigned to this cluster
UpperCAmelCase_ : List[str] = [
vectors[i]
for i in range(len(_lowercase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCAmelCase_ : Any = sess.run(
_lowercase , feed_dict={mean_input: array(_lowercase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCAmelCase_ : Optional[int] = sess.run(_lowercase )
UpperCAmelCase_ : int = sess.run(_lowercase )
return centroids, assignments
| 235
| 1
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 106
|
"""simple docstring"""
__UpperCamelCase : Optional[Any] = [
9_9_9,
8_0_0,
7_9_9,
6_0_0,
5_9_9,
5_0_0,
4_0_0,
3_9_9,
3_7_7,
3_5_5,
3_3_3,
3_1_1,
2_8_8,
2_6_6,
2_4_4,
2_2_2,
2_0_0,
1_9_9,
1_7_7,
1_5_5,
1_3_3,
1_1_1,
8_8,
6_6,
4_4,
2_2,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_7_6,
9_5_2,
9_2_8,
9_0_5,
8_8_2,
8_5_8,
8_5_7,
8_1_0,
7_6_2,
7_1_5,
7_1_4,
5_7_2,
4_2_9,
4_2_8,
2_8_6,
2_8_5,
2_3_8,
1_9_0,
1_4_3,
1_4_2,
1_1_8,
9_5,
7_1,
4_7,
2_4,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_8_8,
9_7_7,
9_6_6,
9_5_5,
9_4_4,
9_3_3,
9_2_2,
9_1_1,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_5_0,
3_0_0,
2_9_9,
2_6_6,
2_3_3,
2_0_0,
1_9_9,
1_7_9,
1_5_9,
1_4_0,
1_2_0,
1_0_0,
9_9,
8_8,
7_7,
6_6,
5_5,
4_4,
3_3,
2_2,
1_1,
0,
]
__UpperCamelCase : Tuple = [
9_9_9,
9_9_5,
9_9_2,
9_8_9,
9_8_5,
9_8_1,
9_7_8,
9_7_5,
9_7_1,
9_6_7,
9_6_4,
9_6_1,
9_5_7,
9_5_6,
9_5_1,
9_4_7,
9_4_2,
9_3_7,
9_3_3,
9_2_8,
9_2_3,
9_1_9,
9_1_4,
9_1_3,
9_0_8,
9_0_3,
8_9_7,
8_9_2,
8_8_7,
8_8_1,
8_7_6,
8_7_1,
8_7_0,
8_6_4,
8_5_8,
8_5_2,
8_4_6,
8_4_0,
8_3_4,
8_2_8,
8_2_7,
8_2_0,
8_1_3,
8_0_6,
7_9_9,
7_9_2,
7_8_5,
7_8_4,
7_7_7,
7_7_0,
7_6_3,
7_5_6,
7_4_9,
7_4_2,
7_4_1,
7_3_3,
7_2_4,
7_1_6,
7_0_7,
6_9_9,
6_9_8,
6_8_8,
6_7_7,
6_6_6,
6_5_6,
6_5_5,
6_4_5,
6_3_4,
6_2_3,
6_1_3,
6_1_2,
5_9_8,
5_8_4,
5_7_0,
5_6_9,
5_5_5,
5_4_1,
5_2_7,
5_2_6,
5_0_5,
4_8_4,
4_8_3,
4_6_2,
4_4_0,
4_3_9,
3_9_6,
3_9_5,
3_5_2,
3_5_1,
3_0_8,
3_0_7,
2_6_4,
2_6_3,
2_2_0,
2_1_9,
1_7_6,
1_3_2,
8_8,
4_4,
0,
]
__UpperCamelCase : str = [
9_9_9,
9_9_7,
9_9_5,
9_9_2,
9_9_0,
9_8_8,
9_8_6,
9_8_4,
9_8_1,
9_7_9,
9_7_7,
9_7_5,
9_7_2,
9_7_0,
9_6_8,
9_6_6,
9_6_4,
9_6_1,
9_5_9,
9_5_7,
9_5_6,
9_5_4,
9_5_1,
9_4_9,
9_4_6,
9_4_4,
9_4_1,
9_3_9,
9_3_6,
9_3_4,
9_3_1,
9_2_9,
9_2_6,
9_2_4,
9_2_1,
9_1_9,
9_1_6,
9_1_4,
9_1_3,
9_1_0,
9_0_7,
9_0_5,
9_0_2,
8_9_9,
8_9_6,
8_9_3,
8_9_1,
8_8_8,
8_8_5,
8_8_2,
8_7_9,
8_7_7,
8_7_4,
8_7_1,
8_7_0,
8_6_7,
8_6_4,
8_6_1,
8_5_8,
8_5_5,
8_5_2,
8_4_9,
8_4_6,
8_4_3,
8_4_0,
8_3_7,
8_3_4,
8_3_1,
8_2_8,
8_2_7,
8_2_4,
8_2_1,
8_1_7,
8_1_4,
8_1_1,
8_0_8,
8_0_4,
8_0_1,
7_9_8,
7_9_5,
7_9_1,
7_8_8,
7_8_5,
7_8_4,
7_8_0,
7_7_7,
7_7_4,
7_7_0,
7_6_6,
7_6_3,
7_6_0,
7_5_6,
7_5_2,
7_4_9,
7_4_6,
7_4_2,
7_4_1,
7_3_7,
7_3_3,
7_3_0,
7_2_6,
7_2_2,
7_1_8,
7_1_4,
7_1_0,
7_0_7,
7_0_3,
6_9_9,
6_9_8,
6_9_4,
6_9_0,
6_8_5,
6_8_1,
6_7_7,
6_7_3,
6_6_9,
6_6_4,
6_6_0,
6_5_6,
6_5_5,
6_5_0,
6_4_6,
6_4_1,
6_3_6,
6_3_2,
6_2_7,
6_2_2,
6_1_8,
6_1_3,
6_1_2,
6_0_7,
6_0_2,
5_9_6,
5_9_1,
5_8_6,
5_8_0,
5_7_5,
5_7_0,
5_6_9,
5_6_3,
5_5_7,
5_5_1,
5_4_5,
5_3_9,
5_3_3,
5_2_7,
5_2_6,
5_1_9,
5_1_2,
5_0_5,
4_9_8,
4_9_1,
4_8_4,
4_8_3,
4_7_4,
4_6_6,
4_5_7,
4_4_9,
4_4_0,
4_3_9,
4_2_8,
4_1_8,
4_0_7,
3_9_6,
3_9_5,
3_8_1,
3_6_6,
3_5_2,
3_5_1,
3_3_0,
3_0_8,
3_0_7,
2_8_6,
2_6_4,
2_6_3,
2_4_2,
2_2_0,
2_1_9,
1_7_6,
1_7_5,
1_3_2,
1_3_1,
8_8,
4_4,
0,
]
__UpperCamelCase : Dict = [
9_9_9,
9_9_1,
9_8_2,
9_7_4,
9_6_6,
9_5_8,
9_5_0,
9_4_1,
9_3_3,
9_2_5,
9_1_6,
9_0_8,
9_0_0,
8_9_9,
8_7_4,
8_5_0,
8_2_5,
8_0_0,
7_9_9,
7_0_0,
6_0_0,
5_0_0,
4_0_0,
3_0_0,
2_0_0,
1_0_0,
0,
]
__UpperCamelCase : int = [
9_9_9,
9_9_2,
9_8_5,
9_7_8,
9_7_1,
9_6_4,
9_5_7,
9_4_9,
9_4_2,
9_3_5,
9_2_8,
9_2_1,
9_1_4,
9_0_7,
9_0_0,
8_9_9,
8_7_9,
8_5_9,
8_4_0,
8_2_0,
8_0_0,
7_9_9,
7_6_6,
7_3_3,
7_0_0,
6_9_9,
6_5_0,
6_0_0,
5_9_9,
5_0_0,
4_9_9,
4_0_0,
3_9_9,
3_0_0,
2_9_9,
2_0_0,
1_9_9,
1_0_0,
9_9,
0,
]
__UpperCamelCase : Optional[int] = [
9_9_9,
9_9_6,
9_9_2,
9_8_9,
9_8_5,
9_8_2,
9_7_9,
9_7_5,
9_7_2,
9_6_8,
9_6_5,
9_6_1,
9_5_8,
9_5_5,
9_5_1,
9_4_8,
9_4_4,
9_4_1,
9_3_8,
9_3_4,
9_3_1,
9_2_7,
9_2_4,
9_2_0,
9_1_7,
9_1_4,
9_1_0,
9_0_7,
9_0_3,
9_0_0,
8_9_9,
8_9_1,
8_8_4,
8_7_6,
8_6_9,
8_6_1,
8_5_3,
8_4_6,
8_3_8,
8_3_0,
8_2_3,
8_1_5,
8_0_8,
8_0_0,
7_9_9,
7_8_8,
7_7_7,
7_6_6,
7_5_5,
7_4_4,
7_3_3,
7_2_2,
7_1_1,
7_0_0,
6_9_9,
6_8_8,
6_7_7,
6_6_6,
6_5_5,
6_4_4,
6_3_3,
6_2_2,
6_1_1,
6_0_0,
5_9_9,
5_8_5,
5_7_1,
5_5_7,
5_4_2,
5_2_8,
5_1_4,
5_0_0,
4_9_9,
4_8_5,
4_7_1,
4_5_7,
4_4_2,
4_2_8,
4_1_4,
4_0_0,
3_9_9,
3_7_9,
3_5_9,
3_4_0,
3_2_0,
3_0_0,
2_9_9,
2_7_9,
2_5_9,
2_4_0,
2_2_0,
2_0_0,
1_9_9,
1_6_6,
1_3_3,
1_0_0,
9_9,
6_6,
3_3,
0,
]
| 106
| 1
|
'''simple docstring'''
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def __lowerCamelCase ( __snake_case : list, __snake_case : list, __snake_case : list, __snake_case : list, __snake_case : list ) -> float:
"""simple docstring"""
A__ : Dict =np.array([[1, item, train_mtch[i]] for i, item in enumerate(_UpperCamelCase )] )
A__ : Union[str, Any] =np.array(_UpperCamelCase )
A__ : Tuple =np.dot(np.dot(np.linalg.inv(np.dot(x.transpose(), _UpperCamelCase ) ), x.transpose() ), _UpperCamelCase )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def __lowerCamelCase ( __snake_case : list, __snake_case : list, __snake_case : list ) -> float:
"""simple docstring"""
A__ : str =(1, 2, 1)
A__ : Any =(1, 1, 0, 7)
A__ : Union[str, Any] =SARIMAX(
_UpperCamelCase, exog=_UpperCamelCase, order=_UpperCamelCase, seasonal_order=_UpperCamelCase )
A__ : List[Any] =model.fit(disp=_UpperCamelCase, maxiter=600, method="""nm""" )
A__ : List[str] =model_fit.predict(1, len(_UpperCamelCase ), exog=[test_match] )
return result[0]
def __lowerCamelCase ( __snake_case : list, __snake_case : list, __snake_case : list ) -> float:
"""simple docstring"""
A__ : int =SVR(kernel="""rbf""", C=1, gamma=0.1, epsilon=0.1 )
regressor.fit(_UpperCamelCase, _UpperCamelCase )
A__ : str =regressor.predict(_UpperCamelCase )
return y_pred[0]
def __lowerCamelCase ( __snake_case : list ) -> float:
"""simple docstring"""
train_user.sort()
A__ : List[str] =np.percentile(_UpperCamelCase, 25 )
A__ : List[Any] =np.percentile(_UpperCamelCase, 75 )
A__ : Tuple =qa - qa
A__ : str =qa - (iqr * 0.1)
return low_lim
def __lowerCamelCase ( __snake_case : list, __snake_case : float ) -> bool:
"""simple docstring"""
A__ : Tuple =0
A__ : Optional[int] =0
for i in list_vote:
if i > actual_result:
A__ : Optional[Any] =not_safe + 1
else:
if abs(abs(_UpperCamelCase ) - abs(_UpperCamelCase ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__snake_case : Dict = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__snake_case : int = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
__snake_case : Union[str, Any] = Normalizer().fit_transform(data_input_df.values)
# split data
__snake_case : int = normalize_df[:, 2].tolist()
__snake_case : List[Any] = normalize_df[:, 0].tolist()
__snake_case : Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__snake_case : List[Any] = normalize_df[:, [1, 2]].tolist()
__snake_case : str = x[: len(x) - 1]
__snake_case : Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
__snake_case : Optional[Any] = total_date[: len(total_date) - 1]
__snake_case : List[str] = total_user[: len(total_user) - 1]
__snake_case : Dict = total_match[: len(total_match) - 1]
__snake_case : Union[str, Any] = total_date[len(total_date) - 1 :]
__snake_case : List[Any] = total_user[len(total_user) - 1 :]
__snake_case : Union[str, Any] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__snake_case : Dict = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__snake_case : Union[str, Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print('Today\'s data is {not_str}safe.')
| 359
|
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( __snake_case : list[int] ) -> bool:
"""simple docstring"""
return len(set(__snake_case ) ) == len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 136
| 0
|
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
__snake_case = logging.get_logger(__name__)
def _A ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=False ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
if not is_sharded:
UpperCamelCase :Any = os.path.abspath(__lowerCAmelCase )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
UpperCamelCase :str = torch.load(__lowerCAmelCase , map_location='''cpu''' )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
UpperCamelCase :Dict = convert_pytorch_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCamelCase :str = convert_pytorch_sharded_state_dict_to_flax(__lowerCAmelCase , __lowerCAmelCase )
return flax_state_dict
def _A ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
def is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> bool:
return len(set(__lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCamelCase :Dict = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCamelCase :Any = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCamelCase :Optional[Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCamelCase :Tuple = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCamelCase :Union[str, Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
UpperCamelCase :int = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCamelCase :Union[str, Any] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__lowerCAmelCase ):
UpperCamelCase :str = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCamelCase :List[str] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCamelCase :Any = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCamelCase :Optional[int] = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCamelCase :Tuple = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCamelCase :int = pt_tuple_key[-2] + """_v"""
if name is not None:
UpperCamelCase :Any = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
# convert pytorch tensor to numpy
UpperCamelCase :List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase :Any = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCamelCase :Optional[int] = flax_model.params["""params"""]
else:
UpperCamelCase :List[Any] = flax_model.params
UpperCamelCase :Union[str, Any] = flatten_dict(__lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase :Tuple = flatten_dict(flax_model.params['''batch_stats'''] )
random_flax_state_dict.update(__lowerCAmelCase )
UpperCamelCase :Dict = {}
UpperCamelCase :Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase :Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase :Optional[int] = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCamelCase :List[str] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase :List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase :Optional[Any] = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
UpperCamelCase :str = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase :List[str] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCamelCase :Optional[Any] = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase :str = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase :Optional[Any] = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def _A ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ):
import torch
# Load the index
UpperCamelCase :Union[str, Any] = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCamelCase :str = torch.load(__lowerCAmelCase )
UpperCamelCase :int = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCamelCase :str = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCamelCase :Tuple = flax_model.params["""params"""]
UpperCamelCase :List[str] = flatten_dict(__lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) )
else:
UpperCamelCase :Any = flax_model.params
UpperCamelCase :Dict = flatten_dict(__lowerCAmelCase )
UpperCamelCase :Optional[int] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
UpperCamelCase :Optional[int] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCamelCase :str = tuple(pt_key.split('''.''' ) )
# remove base model prefix if necessary
UpperCamelCase :Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase :str = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCamelCase :int = rename_key_and_reshape_tensor(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# add model prefix if necessary
UpperCamelCase :Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase :int = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCamelCase :str = jnp.asarray(__lowerCAmelCase )
continue
if "var" in flax_key[-1]:
UpperCamelCase :List[str] = jnp.asarray(__lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
UpperCamelCase :str = jnp.asarray(__lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
UpperCamelCase :Any = jnp.asarray(__lowerCAmelCase )
return unflatten_dict(__lowerCAmelCase )
def _A ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
UpperCamelCase :List[Any] = os.path.abspath(__lowerCAmelCase )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
UpperCamelCase :int = getattr(__lowerCAmelCase , '''Flax''' + model.__class__.__name__ )
# load flax weight dict
with open(__lowerCAmelCase , '''rb''' ) as state_f:
try:
UpperCamelCase :int = from_bytes(__lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(__lowerCAmelCase , __lowerCAmelCase )
def _A ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] ):
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
UpperCamelCase :Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , __lowerCAmelCase ) ).values()
if any(__lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
UpperCamelCase :str = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __lowerCAmelCase )
UpperCamelCase :Union[str, Any] = flatten_dict(__lowerCAmelCase )
UpperCamelCase :int = pt_model.state_dict()
UpperCamelCase :str = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
UpperCamelCase :List[Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCamelCase :Optional[int] = []
UpperCamelCase :Tuple = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCamelCase :int = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCamelCase :Any = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCamelCase :Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCamelCase :List[Any] = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# conv layer
UpperCamelCase :List[Any] = flax_key_tuple[:-1] + ("""weight""",)
UpperCamelCase :Tuple = jnp.transpose(__lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(__lowerCAmelCase ) not in pt_model_dict:
# linear layer
UpperCamelCase :Optional[int] = flax_key_tuple[:-1] + ("""weight""",)
UpperCamelCase :Tuple = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCamelCase :Tuple = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCamelCase :int = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
UpperCamelCase :List[str] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
UpperCamelCase :str = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCamelCase :Dict = """.""".join(__lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCamelCase :Optional[Any] = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCamelCase :List[str] = key.split('''.''' )
UpperCamelCase :Union[str, Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCamelCase :Tuple = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCamelCase :List[Any] = key_components[-2] + """_v"""
if name is not None:
UpperCamelCase :List[str] = key_components[:-3] + [name]
UpperCamelCase :str = """.""".join(__lowerCAmelCase )
UpperCamelCase :Optional[Any] = key
if flax_key in special_pt_names:
UpperCamelCase :Dict = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCamelCase :Dict = np.asarray(__lowerCAmelCase ) if not isinstance(__lowerCAmelCase , np.ndarray ) else flax_tensor
UpperCamelCase :Tuple = torch.from_numpy(__lowerCAmelCase )
# remove from missing keys
missing_keys.remove(__lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__lowerCAmelCase )
pt_model.load_state_dict(__lowerCAmelCase )
# re-transform missing_keys to list
UpperCamelCase :Optional[int] = list(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(__lowerCAmelCase ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
''' use it for predictions and inference.''' )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
'''If your task is similar to the task the model of the checkpoint was trained on, '''
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 259
|
"""simple docstring"""
def _lowercase ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 , 999 )
for b in range(__lowerCAmelCase , 999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(f'{solution() = }')
| 132
| 0
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class snake_case_( unittest.TestCase ):
__UpperCamelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Tuple , UpperCamelCase_ : Dict ):
lowerCAmelCase : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase : int = VideoClassificationPipeline(model=UpperCamelCase_ , image_processor=UpperCamelCase_ , top_k=2 )
lowerCAmelCase : Optional[Any] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def lowerCamelCase__ ( self : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Any ):
for example in examples:
lowerCAmelCase : Optional[int] = video_classifier(UpperCamelCase_ )
self.assertEqual(
UpperCamelCase_ , [
{'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )},
{'''score''': ANY(UpperCamelCase_ ), '''label''': ANY(UpperCamelCase_ )},
] , )
@require_torch
def lowerCamelCase__ ( self : Any ):
lowerCAmelCase : List[Any] = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
lowerCAmelCase : Tuple = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 1_0} , crop_size={'''height''': 1_0, '''width''': 1_0} )
lowerCAmelCase : int = pipeline(
'''video-classification''' , model=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , frame_sampling_rate=4 )
lowerCAmelCase : int = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
lowerCAmelCase : Tuple = video_classifier(UpperCamelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=4 ) , [{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}] , )
lowerCAmelCase : Any = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(UpperCamelCase_ , decimals=4 ) , [
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_199, '''label''': '''LABEL_0'''}, {'''score''': 0.4_801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def lowerCamelCase__ ( self : Union[str, Any] ):
pass
| 352
|
"""simple docstring"""
def _snake_case ( _snake_case : int ):
assert isinstance(_snake_case , _snake_case ), f'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase : Tuple = f'''The input value of [n={number}] has to be > 0'''
raise ValueError(_snake_case )
else:
lowerCAmelCase : str = sylvester(number - 1 )
lowerCAmelCase : Optional[Any] = num - 1
lowerCAmelCase : Optional[Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 314
| 0
|
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class _A:
"""simple docstring"""
def __init__( self , _A , ):
__A : Union[str, Any] = parent
__A : List[Any] = 13
__A : Optional[int] = 7
__A : Tuple = True
__A : str = True
__A : int = True
__A : Any = 99
__A : Tuple = 32
__A : Dict = 2
__A : List[str] = 4
__A : int = 37
__A : Union[str, Any] = 'gelu'
__A : List[Any] = 0.1
__A : List[str] = 0.1
__A : Dict = 512
__A : List[Any] = 16
__A : int = 2
__A : Optional[int] = 0.0_2
__A : str = 3
__A : Tuple = 4
__A : Dict = None
def UpperCAmelCase_ ( self ):
__A : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : int = None
if self.use_input_mask:
__A : Any = random_attention_mask([self.batch_size, self.seq_length] )
__A : str = None
__A : List[str] = None
__A : Tuple = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Tuple = ids_tensor([self.batch_size] , self.num_choices )
__A : Tuple = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase_ ( self ):
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : int = self.prepare_config_and_inputs()
__A : Optional[int] = True
__A : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A ):
__A : str = TFEsmModel(config=_A )
__A : str = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : Dict = model(_A )
__A : List[str] = [input_ids, input_mask]
__A : str = model(_A )
__A : Dict = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A , _A , _A , ):
__A : int = True
__A : List[str] = TFEsmModel(config=_A )
__A : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
__A : Optional[Any] = model(_A )
__A : Optional[int] = [input_ids, input_mask]
__A : int = model(_A , encoder_hidden_states=_A )
# Also check the case where encoder outputs are not passed
__A : Union[str, Any] = model(_A , attention_mask=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A ):
__A : Optional[Any] = TFEsmForMaskedLM(config=_A )
__A : List[Any] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase_ ( self , _A , _A , _A , _A , _A , _A ):
__A : str = self.num_labels
__A : Dict = TFEsmForTokenClassification(config=_A )
__A : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
__A : Tuple = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.prepare_config_and_inputs()
(
(
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) , (
__A
) ,
) : Dict = config_and_inputs
__A : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _A( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase : List[str] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase : str = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase : Optional[int] = False
UpperCamelCase : int = False
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = TFEsmModelTester(self )
__A : Dict = ConfigTester(self , config_class=_A , hidden_size=37 )
def UpperCAmelCase_ ( self ):
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def UpperCAmelCase_ ( self ):
__A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_A )
def UpperCAmelCase_ ( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def UpperCAmelCase_ ( self ):
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def UpperCAmelCase_ ( self ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : Union[str, Any] = TFEsmModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCAmelCase_ ( self ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def UpperCAmelCase_ ( self ):
pass
def UpperCAmelCase_ ( self ):
__A , __A : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__A : Union[str, Any] = model_class(_A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
__A : Optional[Any] = model.get_bias()
assert isinstance(_A , _A )
for k, v in name.items():
assert isinstance(_A , tf.Variable )
else:
__A : Any = model.get_output_embeddings()
assert x is None
__A : Tuple = model.get_bias()
assert name is None
@require_tf
class _A( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase_ ( self ):
__A : List[Any] = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
__A : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
__A : Union[str, Any] = model(_A )[0]
__A : Optional[int] = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _A )
# compare the actual values for a slice.
__A : str = tf.constant(
[
[
[8.9_2_1_5_1_8, -1_0.5_8_9_8_1_4, -6.4_6_7_1_3_0_7],
[-6.3_9_6_7_1_5_6, -1_3.9_1_1_3_7_7, -1.1_2_1_1_9_1_5],
[-7.7_8_1_2_4_7, -1_3.9_5_1_5_5_7, -3.7_4_0_5_9_2],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2 ) )
@slow
def UpperCAmelCase_ ( self ):
__A : int = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
__A : Any = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__A : List[Any] = model(_A )[0]
# compare the actual values for a slice.
__A : Dict = tf.constant(
[
[
[0.1_4_4_4_3_0_9_2, 0.5_4_1_2_5_3_2_7, 0.3_2_4_7_7_3_9],
[0.3_0_3_4_0_4_8_4, 0.0_0_5_2_6_6_7_6, 0.3_1_0_7_7_7_2_2],
[0.3_2_2_7_8_0_4_3, -0.2_4_9_8_7_0_9_6, 0.3_4_1_4_6_2_8],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 280
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class _A( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase_ ( self ):
__A : List[Any] = tempfile.mkdtemp()
# fmt: off
__A : List[str] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__A : Union[str, Any] = dict(zip(_A , range(len(_A ) ) ) )
__A : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__A : int = {'unk_token': '<unk>'}
__A : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__A : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_A ) )
__A : List[Any] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
__A : Optional[int] = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_A , _A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_A )
def UpperCAmelCase_ ( self , **_A ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCAmelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self ):
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : Optional[int] = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase_ ( self ):
__A : List[Any] = self.get_tokenizer()
__A : str = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : Optional[int] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_slow.save_pretrained(self.tmpdirname )
__A : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_A )
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
processor_fast.save_pretrained(self.tmpdirname )
__A : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _A )
self.assertIsInstance(processor_fast.tokenizer , _A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _A )
self.assertIsInstance(processor_fast.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : List[str] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : Optional[int] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__A : Optional[int] = self.get_image_processor(do_normalize=_A )
__A : Any = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : Union[str, Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Union[str, Any] = self.prepare_image_inputs()
__A : int = image_processor(_A , return_tensors='np' )
__A : str = processor(images=_A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCAmelCase_ ( self ):
__A : str = self.get_image_processor()
__A : str = self.get_tokenizer()
__A : Tuple = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : str = 'lower newer'
__A : str = processor(text=_A , return_tensors='np' )
__A : List[str] = tokenizer(_A , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def UpperCAmelCase_ ( self ):
__A : int = self.get_image_processor()
__A : Optional[int] = self.get_tokenizer()
__A : List[str] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Any = 'lower newer'
__A : Optional[Any] = self.prepare_image_inputs()
__A : List[Any] = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Any = 'google/owlvit-base-patch32'
__A : int = OwlViTProcessor.from_pretrained(_A )
__A : Dict = ['cat', 'nasa badge']
__A : Optional[Any] = processor(text=_A )
__A : Optional[int] = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Tuple = 'google/owlvit-base-patch32'
__A : Any = OwlViTProcessor.from_pretrained(_A )
__A : Dict = [['cat', 'nasa badge'], ['person']]
__A : Dict = processor(text=_A )
__A : Optional[int] = 16
__A : Any = len(_A )
__A : Union[str, Any] = max([len(_A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : List[Any] = 'google/owlvit-base-patch32'
__A : str = OwlViTProcessor.from_pretrained(_A )
__A : Union[str, Any] = ['cat', 'nasa badge']
__A : Tuple = processor(text=_A )
__A : str = 16
__A : int = inputs['input_ids']
__A : List[Any] = [
[49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : List[str] = self.get_tokenizer()
__A : Optional[Any] = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = self.prepare_image_inputs()
__A : Optional[int] = processor(images=_A , query_images=_A )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_A ):
processor()
def UpperCAmelCase_ ( self ):
__A : Optional[Any] = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : str = OwlViTProcessor(tokenizer=_A , image_processor=_A )
__A : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Any = processor.batch_decode(_A )
__A : Tuple = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
| 280
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Tuple = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[int] = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357
|
'''simple docstring'''
a_ : List[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def a_ ( __snake_case : int ) -> int:
"""simple docstring"""
lowerCamelCase_ =0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a_ : list[bool | None] = [None] * 10_00_00_00
a_ : List[Any] = True
a_ : Optional[Any] = False
def a_ ( __snake_case : int ) -> bool:
"""simple docstring"""
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase_ =chain(next_number(__snake_case ) )
lowerCamelCase_ =number_chain
while number < 1000_0000:
lowerCamelCase_ =number_chain
number *= 10
return number_chain
def a_ ( __snake_case : int = 1000_0000 ) -> int:
"""simple docstring"""
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 6
| 0
|
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( __a : List[str] ) -> Optional[Any]:
"""simple docstring"""
return DownloadCommand(args.model ,args.cache_dir ,args.force ,args.trust_remote_code )
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
@staticmethod
def __lowercase ( _a ) -> Dict:
_a : Optional[Any] = parser.add_parser('''download''' )
download_parser.add_argument(
'''--cache-dir''' , type=_a , default=_a , help='''Path to location to store the models''' )
download_parser.add_argument(
'''--force''' , action='''store_true''' , help='''Force the model to be download even if already in cache-dir''' )
download_parser.add_argument(
'''--trust-remote-code''' , action='''store_true''' , help='''Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine''' , )
download_parser.add_argument('''model''' , type=_a , help='''Name of the model to download''' )
download_parser.set_defaults(func=_a )
def __init__( self , _a , _a , _a , _a ) -> Any:
_a : int = model
_a : int = cache
_a : Dict = force
_a : Optional[Any] = trust_remote_code
def __lowercase ( self ) -> Dict:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 235
|
from collections.abc import Callable
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , _a = None ) -> None:
# Stores actual heap items.
_a : list = []
# Stores indexes of each item for supporting updates and deletion.
_a : dict = {}
# Stores current size of heap.
_a : Tuple = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_a : Dict = key or (lambda _a : x)
def __lowercase ( self , _a ) -> int | None:
return int((i - 1) / 2 ) if i > 0 else None
def __lowercase ( self , _a ) -> int | None:
_a : Optional[int] = int(2 * i + 1 )
return left if 0 < left < self.size else None
def __lowercase ( self , _a ) -> int | None:
_a : int = int(2 * i + 2 )
return right if 0 < right < self.size else None
def __lowercase ( self , _a , _a ) -> None:
_a , _a : Union[str, Any] = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_a , _a : List[Any] = self.arr[j], self.arr[i]
def __lowercase ( self , _a , _a ) -> bool:
return self.arr[i][1] < self.arr[j][1]
def __lowercase ( self , _a ) -> int:
_a : Dict = self._left(_a )
_a : str = self._right(_a )
_a : str = i
if left is not None and not self._cmp(_a , _a ):
_a : Optional[Any] = left
if right is not None and not self._cmp(_a , _a ):
_a : Any = right
return valid_parent
def __lowercase ( self , _a ) -> None:
_a : List[str] = self._parent(_a )
while parent is not None and not self._cmp(_a , _a ):
self._swap(_a , _a )
_a , _a : Any = parent, self._parent(_a )
def __lowercase ( self , _a ) -> None:
_a : List[Any] = self._get_valid_parent(_a )
while valid_parent != index:
self._swap(_a , _a )
_a , _a : int = valid_parent, self._get_valid_parent(_a )
def __lowercase ( self , _a , _a ) -> None:
if item not in self.pos_map:
return
_a : str = self.pos_map[item]
_a : List[Any] = [item, self.key(_a )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(_a )
self._heapify_down(_a )
def __lowercase ( self , _a ) -> None:
if item not in self.pos_map:
return
_a : Tuple = self.pos_map[item]
del self.pos_map[item]
_a : Tuple = self.arr[self.size - 1]
_a : str = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(_a )
self._heapify_down(_a )
def __lowercase ( self , _a , _a ) -> None:
_a : Union[str, Any] = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(_a )] )
else:
_a : Optional[int] = [item, self.key(_a )]
_a : Tuple = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def __lowercase ( self ) -> tuple | None:
return self.arr[0] if self.size else None
def __lowercase ( self ) -> tuple | None:
_a : Tuple = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235
| 1
|
'''simple docstring'''
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Any = tmp_path / """file.csv"""
snake_case_ : Any = textwrap.dedent(
"""\
header1,header2
1,2
10,20
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :str ):
'''simple docstring'''
snake_case_ : Optional[int] = tmp_path / """malformed_file.csv"""
snake_case_ : int = textwrap.dedent(
"""\
header1,header2
1,2
10,20,
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :int ):
'''simple docstring'''
snake_case_ : str = tmp_path / """csv_with_image.csv"""
snake_case_ : int = textwrap.dedent(
F'''\
image
{image_file}
''' )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Any ):
'''simple docstring'''
snake_case_ : int = tmp_path / """csv_with_label.csv"""
snake_case_ : Tuple = textwrap.dedent(
"""\
label
good
bad
good
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
@pytest.fixture
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : List[str] = tmp_path / """csv_with_int_list.csv"""
snake_case_ : str = textwrap.dedent(
"""\
int_list
1 2 3
4 5 6
7 8 9
""" )
with open(lowerCamelCase_ , """w""" ) as f:
f.write(lowerCamelCase_ )
return str(lowerCamelCase_ )
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :int , lowerCamelCase_ :Tuple ):
'''simple docstring'''
snake_case_ : int = Csv()
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(lowerCamelCase_ , match="""Error tokenizing data""" ):
for _ in generator:
pass
assert any(
record.levelname == """ERROR"""
and """Failed to read file""" in record.message
and os.path.basename(lowerCamelCase_ ) in record.message
for record in caplog.records )
@require_pil
def UpperCAmelCase ( lowerCamelCase_ :Tuple ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : Tuple = f.read().splitlines()[1]
snake_case_ : str = Csv(encoding="""utf-8""" , features=Features({"""image""": Image()} ) )
snake_case_ : Tuple = csv._generate_tables([[csv_file_with_image]] )
snake_case_ : Optional[Any] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""image""" ).type == Image()()
snake_case_ : List[str] = pa_table.to_pydict()["""image"""]
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCAmelCase ( lowerCamelCase_ :int ):
'''simple docstring'''
with open(lowerCamelCase_ , encoding="""utf-8""" ) as f:
snake_case_ : List[Any] = f.read().splitlines()[1:]
snake_case_ : Union[str, Any] = Csv(encoding="""utf-8""" , features=Features({"""label""": ClassLabel(names=["""good""", """bad"""] )} ) )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_label]] )
snake_case_ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("""label""" ).type == ClassLabel(names=["""good""", """bad"""] )()
snake_case_ : Union[str, Any] = pa_table.to_pydict()["""label"""]
assert generated_content == [ClassLabel(names=["""good""", """bad"""] ).straint(lowerCamelCase_ ) for label in labels]
def UpperCAmelCase ( lowerCamelCase_ :Union[str, Any] ):
'''simple docstring'''
snake_case_ : str = Csv(encoding="""utf-8""" , sep=""",""" , converters={"""int_list""": lambda lowerCamelCase_ : [int(lowerCamelCase_ ) for i in x.split()]} )
snake_case_ : Optional[Any] = csv._generate_tables([[csv_file_with_int_list]] )
snake_case_ : Tuple = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("""int_list""" ).type )
snake_case_ : Dict = pa_table.to_pydict()["""int_list"""]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 350
|
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def a__ ( self :Dict ):
snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Dict = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
@slow
def a__ ( self :Union[str, Any] ):
snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Any = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
| 8
| 0
|
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def __lowerCamelCase ( A__ ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def __lowerCamelCase ( A__ , A__ , A__ ) -> np.ndarray:
"""simple docstring"""
UpperCamelCase = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(A__ , A__ )
# Predict target for test data
UpperCamelCase = xgb.predict(A__ )
UpperCamelCase = predictions.reshape(len(A__ ) , 1 )
return predictions
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
UpperCamelCase = fetch_california_housing()
UpperCamelCase , UpperCamelCase = data_handling(A__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = train_test_split(
A__ , A__ , test_size=0.25 , random_state=1 )
UpperCamelCase = xgboost(A__ , A__ , A__ )
# Error printing
print(F"""Mean Absolute Error : {mean_absolute_error(A__ , A__ )}""" )
print(F"""Mean Square Error : {mean_squared_error(A__ , A__ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 28
|
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
UpperCAmelCase : Optional[Any] = "T5Config"
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
lowercase__ = "mt5"
lowercase__ = MTaConfig
| 136
| 0
|
import collections
import importlib.util
import os
import re
from pathlib import Path
UpperCamelCase__ ='src/transformers'
# Matches is_xxx_available()
UpperCamelCase__ =re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
UpperCamelCase__ =re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
UpperCamelCase__ =re.compile(R'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
UpperCamelCase__ =re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
UpperCamelCase__ =re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
UpperCamelCase__ =re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
UpperCamelCase__ =re.compile('^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
UpperCamelCase__ =re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
UpperCamelCase__ =re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
UpperCamelCase__ =re.compile(R'^\s*try:')
# Catches a line with else:
UpperCamelCase__ =re.compile(R'^\s*else:')
def lowerCamelCase__ (__lowerCamelCase ):
if _re_test_backend.search(__lowerCamelCase ) is None:
return None
_SCREAMING_SNAKE_CASE : Dict = [b[0] for b in _re_backend.findall(__lowerCamelCase )]
backends.sort()
return "_and_".join(__lowerCamelCase )
def lowerCamelCase__ (__lowerCamelCase ):
with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f:
_SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines()
_SCREAMING_SNAKE_CASE : str = 0
while line_index < len(__lowerCamelCase ) and not lines[line_index].startswith("_import_structure = {" ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(__lowerCamelCase ):
return None
# First grab the objects without a specific backend in _import_structure
_SCREAMING_SNAKE_CASE : Optional[int] = []
while not lines[line_index].startswith("if TYPE_CHECKING" ) and find_backend(lines[line_index] ) is None:
_SCREAMING_SNAKE_CASE : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = _re_one_line_import_struct.search(__lowerCamelCase ).groups()[0]
_SCREAMING_SNAKE_CASE : Optional[Any] = re.findall("\[([^\]]+)\]", __lowerCamelCase )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(", " )] )
line_index += 1
continue
_SCREAMING_SNAKE_CASE : List[Any] = _re_import_struct_key_value.search(__lowerCamelCase )
if single_line_import_search is not None:
_SCREAMING_SNAKE_CASE : str = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(", " ) if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
line_index += 1
_SCREAMING_SNAKE_CASE : List[Any] = {"none": objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith("if TYPE_CHECKING" ):
# If the line is an if not is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE : str = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_SCREAMING_SNAKE_CASE : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 4 ):
_SCREAMING_SNAKE_CASE : Optional[Any] = lines[line_index]
if _re_import_struct_add_one.search(__lowerCamelCase ) is not None:
objects.append(_re_import_struct_add_one.search(__lowerCamelCase ).groups()[0] )
elif _re_import_struct_add_many.search(__lowerCamelCase ) is not None:
_SCREAMING_SNAKE_CASE : List[Any] = _re_import_struct_add_many.search(__lowerCamelCase ).groups()[0].split(", " )
_SCREAMING_SNAKE_CASE : int = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_between_brackets.search(__lowerCamelCase ) is not None:
_SCREAMING_SNAKE_CASE : str = _re_between_brackets.search(__lowerCamelCase ).groups()[0].split(", " )
_SCREAMING_SNAKE_CASE : int = [obj[1:-1] for obj in imports if len(__lowerCamelCase ) > 0]
objects.extend(__lowerCamelCase )
elif _re_quote_object.search(__lowerCamelCase ) is not None:
objects.append(_re_quote_object.search(__lowerCamelCase ).groups()[0] )
elif line.startswith(" " * 8 + "\"" ):
objects.append(line[9:-3] )
elif line.startswith(" " * 12 + "\"" ):
objects.append(line[13:-3] )
line_index += 1
_SCREAMING_SNAKE_CASE : Any = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
_SCREAMING_SNAKE_CASE : Optional[Any] = []
while (
line_index < len(__lowerCamelCase )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith("else" )
):
_SCREAMING_SNAKE_CASE : Any = lines[line_index]
_SCREAMING_SNAKE_CASE : Optional[Any] = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 8 ):
objects.append(line[8:-2] )
line_index += 1
_SCREAMING_SNAKE_CASE : Tuple = {"none": objects}
# Let's continue with backend-specific objects
while line_index < len(__lowerCamelCase ):
# If the line is an if is_backend_available, we grab all objects associated.
_SCREAMING_SNAKE_CASE : Optional[int] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
_SCREAMING_SNAKE_CASE : Any = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
_SCREAMING_SNAKE_CASE : Dict = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(" " * 8 ):
_SCREAMING_SNAKE_CASE : List[Any] = lines[line_index]
_SCREAMING_SNAKE_CASE : int = _re_import.search(__lowerCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(", " ) )
elif line.startswith(" " * 12 ):
objects.append(line[12:-2] )
line_index += 1
_SCREAMING_SNAKE_CASE : Dict = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def lowerCamelCase__ (__lowerCamelCase, __lowerCamelCase ):
def find_duplicates(__lowerCamelCase ):
return [k for k, v in collections.Counter(__lowerCamelCase ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
_SCREAMING_SNAKE_CASE : Optional[int] = []
for key in import_dict_objects.keys():
_SCREAMING_SNAKE_CASE : Tuple = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
_SCREAMING_SNAKE_CASE : str = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
_SCREAMING_SNAKE_CASE : List[Any] = "base imports" if key == "none" else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : List[Any] = []
for root, _, files in os.walk(__lowerCamelCase ):
if "__init__.py" in files:
_SCREAMING_SNAKE_CASE : Dict = os.path.join(__lowerCamelCase, "__init__.py" )
_SCREAMING_SNAKE_CASE : List[str] = parse_init(__lowerCamelCase )
if objects is not None:
_SCREAMING_SNAKE_CASE : Dict = analyze_results(*__lowerCamelCase )
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : str = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append("\n".join(__lowerCamelCase ) )
if len(__lowerCamelCase ) > 0:
raise ValueError("\n\n".join(__lowerCamelCase ) )
def lowerCamelCase__ ():
_SCREAMING_SNAKE_CASE : int = []
for path, directories, files in os.walk(__lowerCamelCase ):
for folder in directories:
# Ignore private modules
if folder.startswith("_" ):
directories.remove(__lowerCamelCase )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(__lowerCamelCase ) / folder).glob("*.py" ) ) ) == 0:
continue
_SCREAMING_SNAKE_CASE : Any = str((Path(__lowerCamelCase ) / folder).relative_to(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = short_path.replace(os.path.sep, "." )
submodules.append(__lowerCamelCase )
for fname in files:
if fname == "__init__.py":
continue
_SCREAMING_SNAKE_CASE : Union[str, Any] = str((Path(__lowerCamelCase ) / fname).relative_to(__lowerCamelCase ) )
_SCREAMING_SNAKE_CASE : List[str] = short_path.replace(".py", "" ).replace(os.path.sep, "." )
if len(submodule.split("." ) ) == 1:
submodules.append(__lowerCamelCase )
return submodules
UpperCamelCase__ =[
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def lowerCamelCase__ ():
# This is to make sure the transformers module imported is the one in the repo.
_SCREAMING_SNAKE_CASE : Dict = importlib.util.spec_from_file_location(
"transformers", os.path.join(__lowerCamelCase, "__init__.py" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], )
_SCREAMING_SNAKE_CASE : List[str] = spec.loader.load_module()
_SCREAMING_SNAKE_CASE : List[Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(__lowerCamelCase ) > 0:
_SCREAMING_SNAKE_CASE : Any = "\n".join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
"The following submodules are not properly registered in the main init of Transformers:\n"
f"""{list_of_modules}\n"""
"Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value." )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 353
|
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ =logging.get_logger(__name__)
UpperCamelCase__ ={'vocab_file': 'spiece.model'}
UpperCamelCase__ ={
'vocab_file': {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/spiece.model',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/spiece.model',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/spiece.model',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/spiece.model',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model',
}
}
UpperCamelCase__ ={
'albert-base-v1': 512,
'albert-large-v1': 512,
'albert-xlarge-v1': 512,
'albert-xxlarge-v1': 512,
'albert-base-v2': 512,
'albert-large-v2': 512,
'albert-xlarge-v2': 512,
'albert-xxlarge-v2': 512,
}
UpperCamelCase__ ='▁'
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = VOCAB_FILES_NAMES
__snake_case = PRETRAINED_VOCAB_FILES_MAP
__snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __lowerCamelCase , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase="[CLS]" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<unk>" , __lowerCamelCase="[SEP]" , __lowerCamelCase="<pad>" , __lowerCamelCase="[CLS]" , __lowerCamelCase="[MASK]" , __lowerCamelCase = None , **__lowerCamelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_SCREAMING_SNAKE_CASE : List[Any] = (
AddedToken(__lowerCamelCase , lstrip=__lowerCamelCase , rstrip=__lowerCamelCase , normalized=__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase )
else mask_token
)
_SCREAMING_SNAKE_CASE : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__lowerCamelCase , remove_space=__lowerCamelCase , keep_accents=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , unk_token=__lowerCamelCase , sep_token=__lowerCamelCase , pad_token=__lowerCamelCase , cls_token=__lowerCamelCase , mask_token=__lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = do_lower_case
_SCREAMING_SNAKE_CASE : List[Any] = remove_space
_SCREAMING_SNAKE_CASE : str = keep_accents
_SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
_SCREAMING_SNAKE_CASE : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCamelCase )
@property
def UpperCamelCase_ ( self ) -> Optional[Any]:
return len(self.sp_model )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(__lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = self.__dict__.copy()
_SCREAMING_SNAKE_CASE : Optional[Any] = None
return state
def __setstate__( self , __lowerCamelCase ) -> Tuple:
_SCREAMING_SNAKE_CASE : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_SCREAMING_SNAKE_CASE : Optional[int] = {}
_SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
if self.remove_space:
_SCREAMING_SNAKE_CASE : List[str] = " ".join(inputs.strip().split() )
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = inputs
_SCREAMING_SNAKE_CASE : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
_SCREAMING_SNAKE_CASE : str = unicodedata.normalize("NFKD" , __lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = "".join([c for c in outputs if not unicodedata.combining(__lowerCamelCase )] )
if self.do_lower_case:
_SCREAMING_SNAKE_CASE : Dict = outputs.lower()
return outputs
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = self.preprocess_text(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = self.sp_model.encode(__lowerCamelCase , out_type=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = []
for piece in pieces:
if len(__lowerCamelCase ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
_SCREAMING_SNAKE_CASE : Dict = self.sp_model.EncodeAsPieces(piece[:-1].replace(__lowerCamelCase , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_SCREAMING_SNAKE_CASE : Union[str, Any] = cur_pieces[1:]
else:
_SCREAMING_SNAKE_CASE : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__lowerCamelCase )
else:
new_pieces.append(__lowerCamelCase )
return new_pieces
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[Any]:
return self.sp_model.PieceToId(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> str:
return self.sp_model.IdToPiece(__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE : Dict = []
_SCREAMING_SNAKE_CASE : List[str] = ""
_SCREAMING_SNAKE_CASE : List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCamelCase ) + token
_SCREAMING_SNAKE_CASE : str = True
_SCREAMING_SNAKE_CASE : Optional[Any] = []
else:
current_sub_tokens.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Optional[int] = False
out_string += self.sp_model.decode(__lowerCamelCase )
return out_string.strip()
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCamelCase , token_ids_a=__lowerCamelCase , already_has_special_tokens=__lowerCamelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__lowerCamelCase )) + [1] + ([0] * len(__lowerCamelCase )) + [1]
return [1] + ([0] * len(__lowerCamelCase )) + [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> List[int]:
_SCREAMING_SNAKE_CASE : Dict = [self.sep_token_id]
_SCREAMING_SNAKE_CASE : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(__lowerCamelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
__lowerCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCamelCase , "wb" ) as fi:
_SCREAMING_SNAKE_CASE : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__lowerCamelCase )
return (out_vocab_file,)
| 325
| 0
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( A__ ):
_lowerCamelCase : Any = """naver-clova-ix/donut-base-finetuned-docvqa"""
_lowerCamelCase : Dict = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
_lowerCamelCase : Dict = """document_qa"""
_lowerCamelCase : int = AutoProcessor
_lowerCamelCase : List[str] = VisionEncoderDecoderModel
_lowerCamelCase : int = ["""image""", """text"""]
_lowerCamelCase : Union[str, Any] = ["""text"""]
def __init__( self : List[str] , *snake_case_ : Tuple , **snake_case_ : int ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def lowercase ( self : Any , snake_case_ : "Image" , snake_case_ : str ):
_UpperCAmelCase = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_UpperCAmelCase = task_prompt.replace("{user_input}" , __lowerCamelCase )
_UpperCAmelCase = self.pre_processor.tokenizer(
__lowerCamelCase , add_special_tokens=__lowerCamelCase , return_tensors="pt" ).input_ids
_UpperCAmelCase = self.pre_processor(__lowerCamelCase , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def lowercase ( self : List[str] , snake_case_ : Optional[int] ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowerCamelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowerCamelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowerCamelCase , ).sequences
def lowercase ( self : Union[str, Any] , snake_case_ : int ):
_UpperCAmelCase = self.pre_processor.batch_decode(__lowerCamelCase )[0]
_UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_UpperCAmelCase = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_UpperCAmelCase = re.sub(r"<.*?>" , "" , __lowerCamelCase , count=1 ).strip() # remove first task start token
_UpperCAmelCase = self.pre_processor.tokenajson(__lowerCamelCase )
return sequence["answer"]
| 22
|
from ...configuration_utils import PretrainedConfig
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''google/tapas-base-finetuned-sqa''': (
'''https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wtq''': (
'''https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-wikisql-supervised''': (
'''https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json'''
),
'''google/tapas-base-finetuned-tabfact''': (
'''https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json'''
),
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "tapas"
def __init__( self : int , __lowerCamelCase : Optional[Any]=3_0522 , __lowerCamelCase : Tuple=768 , __lowerCamelCase : int=12 , __lowerCamelCase : Any=12 , __lowerCamelCase : Union[str, Any]=3072 , __lowerCamelCase : Optional[int]="gelu" , __lowerCamelCase : Optional[int]=0.1 , __lowerCamelCase : Dict=0.1 , __lowerCamelCase : str=1024 , __lowerCamelCase : Union[str, Any]=[3, 256, 256, 2, 256, 256, 10] , __lowerCamelCase : Optional[int]=0.02 , __lowerCamelCase : List[str]=1e-12 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : Optional[Any]=10.0 , __lowerCamelCase : Optional[Any]=0 , __lowerCamelCase : str=1.0 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=1.0 , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : int=1.0 , __lowerCamelCase : Dict=1.0 , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : int=False , __lowerCamelCase : List[str]="ratio" , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Any=32 , __lowerCamelCase : Tuple=False , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : Tuple=False , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=False , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : Optional[Any]=None , **__lowerCamelCase : str , ) -> str:
super().__init__(pad_token_id=__lowerCamelCase , **__lowerCamelCase )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_sizes
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
# Fine-tuning task hyperparameters
SCREAMING_SNAKE_CASE__ = positive_label_weight
SCREAMING_SNAKE_CASE__ = num_aggregation_labels
SCREAMING_SNAKE_CASE__ = aggregation_loss_weight
SCREAMING_SNAKE_CASE__ = use_answer_as_supervision
SCREAMING_SNAKE_CASE__ = answer_loss_importance
SCREAMING_SNAKE_CASE__ = use_normalized_answer_loss
SCREAMING_SNAKE_CASE__ = huber_loss_delta
SCREAMING_SNAKE_CASE__ = temperature
SCREAMING_SNAKE_CASE__ = aggregation_temperature
SCREAMING_SNAKE_CASE__ = use_gumbel_for_cells
SCREAMING_SNAKE_CASE__ = use_gumbel_for_aggregation
SCREAMING_SNAKE_CASE__ = average_approximation_function
SCREAMING_SNAKE_CASE__ = cell_selection_preference
SCREAMING_SNAKE_CASE__ = answer_loss_cutoff
SCREAMING_SNAKE_CASE__ = max_num_rows
SCREAMING_SNAKE_CASE__ = max_num_columns
SCREAMING_SNAKE_CASE__ = average_logits_per_cell
SCREAMING_SNAKE_CASE__ = select_one_column
SCREAMING_SNAKE_CASE__ = allow_empty_column_selection
SCREAMING_SNAKE_CASE__ = init_cell_selection_weights_to_zero
SCREAMING_SNAKE_CASE__ = reset_position_index_per_cell
SCREAMING_SNAKE_CASE__ = disable_per_token_loss
# Aggregation hyperparameters
SCREAMING_SNAKE_CASE__ = aggregation_labels
SCREAMING_SNAKE_CASE__ = no_aggregation_label_index
if isinstance(self.aggregation_labels , __lowerCamelCase ):
SCREAMING_SNAKE_CASE__ = {int(__lowerCamelCase ): v for k, v in aggregation_labels.items()}
| 314
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 112
|
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : List[str] = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase : str = '''LayoutLMv2ImageProcessor'''
__UpperCAmelCase : Dict = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , UpperCamelCase__ , )
snake_case : Dict = kwargs.pop("feature_extractor" )
snake_case : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = True , UpperCamelCase__ = False , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = 0 , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = False , UpperCamelCase__ = True , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
snake_case : Any = self.image_processor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
snake_case : Optional[int] = [text] # add batch dimension (as the image processor always adds a batch dimension)
snake_case : Optional[Any] = features["words"]
snake_case : Dict = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , stride=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_token_type_ids=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , return_special_tokens_mask=UpperCamelCase__ , return_offsets_mapping=UpperCamelCase__ , return_length=UpperCamelCase__ , verbose=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ , )
# add pixel values
snake_case : Dict = features.pop("pixel_values" )
if return_overflowing_tokens is True:
snake_case : Any = self.get_overflowing_images(UpperCamelCase__ , encoded_inputs["overflow_to_sample_mapping"] )
snake_case : str = images
return encoded_inputs
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
snake_case : Union[str, Any] = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F' {len(UpperCamelCase__ )} and {len(UpperCamelCase__ )}' )
return images_with_overflow
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCamelCase ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCamelCase ( self ) -> Dict:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def lowerCamelCase ( self ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , UpperCamelCase__ , )
return self.image_processor_class
@property
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , UpperCamelCase__ , )
return self.image_processor
| 112
| 1
|
def a ( snake_case__: int = 100 ):
'''simple docstring'''
lowercase_ = (n * (n + 1) // 2) ** 2
lowercase_ = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 30
|
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 - _cos) / 2
__a = 1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = (1 + _cos) / 2
__a = -1 - _cos
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = _sin / 2
__a = 0
__a = -ba
__a = 1 + alpha
__a = -2 * _cos
__a = 1 - alpha
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ = 1 / sqrt(2 ) ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 1 - alpha
__a = -2 * _cos
__a = 1 + alpha
__a = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = 1 + alpha * big_a
__a = -2 * _cos
__a = 1 - alpha * big_a
__a = 1 + alpha / big_a
__a = -2 * _cos
__a = 1 - alpha / big_a
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (pmc + aaa)
__a = 2 * big_a * mpc
__a = big_a * (pmc - aaa)
__a = ppmc + aaa
__a = -2 * pmpc
__a = ppmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __lowerCAmelCase ( a__ , a__ , a__ , a__ = 1 / sqrt(2 ) , ) -> IIRFilter:
__a = tau * frequency / samplerate
__a = sin(a__ )
__a = cos(a__ )
__a = _sin / (2 * q_factor)
__a = 10 ** (gain_db / 40)
__a = (big_a + 1) - (big_a - 1) * _cos
__a = (big_a + 1) + (big_a - 1) * _cos
__a = (big_a - 1) - (big_a + 1) * _cos
__a = (big_a - 1) + (big_a + 1) * _cos
__a = 2 * sqrt(a__ ) * alpha
__a = big_a * (ppmc + aaa)
__a = -2 * big_a * pmpc
__a = big_a * (ppmc - aaa)
__a = pmc + aaa
__a = 2 * mpc
__a = pmc - aaa
__a = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 6
| 0
|
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("""To use the rich extension, install rich with `pip install rich`""")
| 359
|
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class lowerCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
super().__init__()
snake_case_ = value_function
snake_case_ = unet
snake_case_ = scheduler
snake_case_ = env
snake_case_ = env.get_dataset()
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].mean()
except: # noqa: E722
pass
snake_case_ = {}
for key in self.data.keys():
try:
snake_case_ = self.data[key].std()
except: # noqa: E722
pass
snake_case_ = env.observation_space.shape[0]
snake_case_ = env.action_space.shape[0]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return (x_in - self.means[key]) / self.stds[key]
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase ):
return x_in * self.stds[key] + self.means[key]
def UpperCamelCase__ ( self , _UpperCAmelCase ):
if type(_UpperCAmelCase ) is dict:
return {k: self.to_torch(_UpperCAmelCase ) for k, v in x_in.items()}
elif torch.is_tensor(_UpperCAmelCase ):
return x_in.to(self.unet.device )
return torch.tensor(_UpperCAmelCase , device=self.unet.device )
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
for key, val in cond.items():
snake_case_ = val.clone()
return x_in
def UpperCamelCase__ ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
snake_case_ = x.shape[0]
snake_case_ = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
snake_case_ = torch.full((batch_size,) , _UpperCAmelCase , device=self.unet.device , dtype=torch.long )
for _ in range(_UpperCAmelCase ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
snake_case_ = self.value_function(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample
snake_case_ = torch.autograd.grad([y.sum()] , [x] )[0]
snake_case_ = self.scheduler._get_variance(_UpperCAmelCase )
snake_case_ = torch.exp(0.5 * posterior_variance )
snake_case_ = model_std * grad
snake_case_ = 0
snake_case_ = x.detach()
snake_case_ = x + scale * grad
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.unet(x.permute(0 , 2 , 1 ) , _UpperCAmelCase ).sample.permute(0 , 2 , 1 )
# TODO: verify deprecation of this kwarg
snake_case_ = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , predict_epsilon=_UpperCAmelCase )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
return x, y
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 ):
# normalize the observations and create batch dimension
snake_case_ = self.normalize(_UpperCAmelCase , '''observations''' )
snake_case_ = obs[None].repeat(_UpperCAmelCase , axis=0 )
snake_case_ = {0: self.to_torch(_UpperCAmelCase )}
snake_case_ = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
snake_case_ = randn_tensor(_UpperCAmelCase , device=self.unet.device )
snake_case_ = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim )
snake_case_ = self.to_torch(_UpperCAmelCase )
# run the diffusion process
snake_case_ , snake_case_ = self.run_diffusion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# sort output trajectories by value
snake_case_ = y.argsort(0 , descending=_UpperCAmelCase ).squeeze()
snake_case_ = x[sorted_idx]
snake_case_ = sorted_values[:, :, : self.action_dim]
snake_case_ = actions.detach().cpu().numpy()
snake_case_ = self.de_normalize(_UpperCAmelCase , key='''actions''' )
# select the action with the highest value
if y is not None:
snake_case_ = 0
else:
# if we didn't run value guiding, select a random action
snake_case_ = np.random.randint(0 , _UpperCAmelCase )
snake_case_ = denorm_actions[selected_index, 0]
return denorm_actions
| 267
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 243
|
from decimal import Decimal, getcontext
from math import ceil, factorial
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
snake_case_ = precision
snake_case_ = ceil(precision / 14 )
snake_case_ = 426880 * Decimal(10005 ).sqrt()
snake_case_ = 1
snake_case_ = 13591409
snake_case_ = Decimal(SCREAMING_SNAKE_CASE__ )
for k in range(1 , SCREAMING_SNAKE_CASE__ ):
snake_case_ = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE__ ) ** 3)
linear_term += 545140134
exponential_term *= -262537412640768000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
lowerCAmelCase_ = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 8
| 0
|
"""simple docstring"""
A : List[Any] = [sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
A : list[bool | None] = [None] * 1_0_0_0_0_0_0_0
A : List[Any] = True
A : Optional[Any] = False
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCAmelCase = chain(next_number(__snake_case ) )
__lowerCAmelCase = number_chain
while number < 1000_0000:
__lowerCAmelCase = number_chain
number *= 10
return number_chain
def _lowerCamelCase ( _UpperCamelCase = 1000_0000 ):
'''simple docstring'''
for i in range(1 , __snake_case ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 356
|
"""simple docstring"""
from __future__ import annotations
import time
A : Union[str, Any] = list[tuple[int, int]]
A : int = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
A : int = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a , __a , __a , __a ):
__lowerCAmelCase = pos_x
__lowerCAmelCase = pos_y
__lowerCAmelCase = (pos_y, pos_x)
__lowerCAmelCase = goal_x
__lowerCAmelCase = goal_y
__lowerCAmelCase = parent
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = Node(start[1] , start[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = Node(goal[1] , goal[0] , goal[1] , goal[0] , __a )
__lowerCAmelCase = [self.start]
__lowerCAmelCase = False
def snake_case ( self ):
while self.node_queue:
__lowerCAmelCase = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase = True
return self.retrace_path(__a )
__lowerCAmelCase = self.get_successors(__a )
for node in successors:
self.node_queue.append(__a )
if not self.reached:
return [self.start.pos]
return None
def snake_case ( self , __a ):
__lowerCAmelCase = []
for action in delta:
__lowerCAmelCase = parent.pos_x + action[1]
__lowerCAmelCase = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__a ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__a , __a , self.target.pos_y , self.target.pos_x , __a ) )
return successors
def snake_case ( self , __a ):
__lowerCAmelCase = node
__lowerCAmelCase = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase = current_node.parent
path.reverse()
return path
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __a , __a ):
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = BreadthFirstSearch(__a , __a )
__lowerCAmelCase = False
def snake_case ( self ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__lowerCAmelCase = self.fwd_bfs.node_queue.pop(0 )
__lowerCAmelCase = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__lowerCAmelCase = True
return self.retrace_bidirectional_path(
__a , __a )
__lowerCAmelCase = current_bwd_node
__lowerCAmelCase = current_fwd_node
__lowerCAmelCase = {
self.fwd_bfs: self.fwd_bfs.get_successors(__a ),
self.bwd_bfs: self.bwd_bfs.get_successors(__a ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__a )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def snake_case ( self , __a , __a ):
__lowerCAmelCase = self.fwd_bfs.retrace_path(__a )
__lowerCAmelCase = self.bwd_bfs.retrace_path(__a )
bwd_path.pop()
bwd_path.reverse()
__lowerCAmelCase = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
A : List[Any] = (0, 0)
A : Union[str, Any] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
A : Any = time.time()
A : Dict = BreadthFirstSearch(init, goal)
A : Any = bfs.search()
A : List[str] = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
A : Optional[Any] = time.time()
A : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
A : Any = bd_bfs.search()
A : str = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 259
| 0
|
import requests
__snake_case = """https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="""
def _A ( SCREAMING_SNAKE_CASE__ : str ):
# fetching a list of articles in json format
UpperCamelCase :Tuple = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'''{i}.) {article["title"]}''' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="""<Your BBC News API key goes here>""")
| 259
|
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : List[Any] = "layoutlmv3"
def __init__( self : Optional[Any] , _UpperCAmelCase : Dict=5_02_65 , _UpperCAmelCase : str=7_68 , _UpperCAmelCase : Union[str, Any]=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Optional[int]=30_72 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : int=0.1 , _UpperCAmelCase : Optional[int]=5_12 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Dict=0.02 , _UpperCAmelCase : Optional[int]=1e-5 , _UpperCAmelCase : str=1 , _UpperCAmelCase : Union[str, Any]=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : Dict=10_24 , _UpperCAmelCase : int=1_28 , _UpperCAmelCase : Dict=1_28 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : List[Any]=1_28 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : List[Any]=2_56 , _UpperCAmelCase : int=True , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Optional[int]=2_24 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[Any]=16 , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : List[str] , ) -> Dict:
"""simple docstring"""
super().__init__(
vocab_size=_UpperCAmelCase , hidden_size=_UpperCAmelCase , num_hidden_layers=_UpperCAmelCase , num_attention_heads=_UpperCAmelCase , intermediate_size=_UpperCAmelCase , hidden_act=_UpperCAmelCase , hidden_dropout_prob=_UpperCAmelCase , attention_probs_dropout_prob=_UpperCAmelCase , max_position_embeddings=_UpperCAmelCase , type_vocab_size=_UpperCAmelCase , initializer_range=_UpperCAmelCase , layer_norm_eps=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase , )
__lowercase = max_ad_position_embeddings
__lowercase = coordinate_size
__lowercase = shape_size
__lowercase = has_relative_attention_bias
__lowercase = rel_pos_bins
__lowercase = max_rel_pos
__lowercase = has_spatial_attention_bias
__lowercase = rel_ad_pos_bins
__lowercase = max_rel_ad_pos
__lowercase = text_embed
__lowercase = visual_embed
__lowercase = input_size
__lowercase = num_channels
__lowercase = patch_size
__lowercase = classifier_dropout
class A__ ( lowerCAmelCase__ ):
lowerCAmelCase__ : int = version.parse("1.12" )
@property
def a__ ( self : int ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def a__ ( self : int ) -> float:
"""simple docstring"""
return 1e-5
@property
def a__ ( self : str ) -> int:
"""simple docstring"""
return 12
def a__ ( self : str , _UpperCAmelCase : "ProcessorMixin" , _UpperCAmelCase : int = -1 , _UpperCAmelCase : int = -1 , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional["TensorType"] = None , _UpperCAmelCase : int = 3 , _UpperCAmelCase : int = 40 , _UpperCAmelCase : int = 40 , ) -> Mapping[str, Any]:
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , _UpperCAmelCase )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowercase = processor.tokenizer.num_special_tokens_to_add(_UpperCAmelCase )
__lowercase = compute_effective_axis_dimension(
_UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
__lowercase = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
__lowercase = [[[48, 84, 73, 1_28]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
__lowercase = self._generate_dummy_images(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
__lowercase = dict(
processor(
_UpperCAmelCase , text=_UpperCAmelCase , boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , ) )
return inputs
| 325
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 356
|
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_lowerCAmelCase = 10
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for i in range(UpperCamelCase , UpperCamelCase ):
if array[i] == target:
return i
return -1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Optional[int] = len(UpperCamelCase )
while left <= right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = (left + right) // 3 + 1
lowerCAmelCase__ : List[str] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase__ : int = one_third - 1
elif array[two_third] < target:
lowerCAmelCase__ : Union[str, Any] = two_third + 1
else:
lowerCAmelCase__ : List[Any] = one_third + 1
lowerCAmelCase__ : List[str] = two_third - 1
else:
return -1
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = (left + right) // 3 + 1
lowerCAmelCase__ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(UpperCamelCase , one_third - 1 , UpperCamelCase , UpperCamelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , UpperCamelCase , UpperCamelCase , UpperCamelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , UpperCamelCase , UpperCamelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase = input('''Enter numbers separated by comma:\n''').strip()
_lowerCAmelCase = [int(item.strip()) for item in user_input.split(''',''')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
_lowerCAmelCase = int(input('''Enter the number to be found in the list:\n''').strip())
_lowerCAmelCase = ite_ternary_search(collection, target)
_lowerCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F"""Iterative search: {target} found at positions: {resulta}""")
print(F"""Recursive search: {target} found at positions: {resulta}""")
else:
print('''Not found''')
| 184
| 0
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
def lowerCAmelCase_ ( _lowerCamelCase: list ):
if not postfix_notation:
return 0
__SCREAMING_SNAKE_CASE : Any = {"""+""", """-""", """*""", """/"""}
__SCREAMING_SNAKE_CASE : list[Any] = []
for token in postfix_notation:
if token in operations:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_lowerCamelCase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 112
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
UpperCamelCase__ : Optional[int] = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
UpperCamelCase__ : List[Any] = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
__SCREAMING_SNAKE_CASE : int = bs[:]
__SCREAMING_SNAKE_CASE : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(_lowerCamelCase )
cs.append(2**8 + n )
n += 1
__SCREAMING_SNAKE_CASE : Dict = [chr(_lowerCamelCase ) for n in cs]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: int ):
__SCREAMING_SNAKE_CASE : List[str] = set()
__SCREAMING_SNAKE_CASE : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE : Optional[int] = char
return pairs
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A : List[str] = VOCAB_FILES_NAMES
_A : Tuple = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : int = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple="replace" , lowerCAmelCase__ : str="<s>" , lowerCAmelCase__ : Dict="</s>" , lowerCAmelCase__ : Union[str, Any]="</s>" , lowerCAmelCase__ : Any="<s>" , lowerCAmelCase__ : str="<unk>" , lowerCAmelCase__ : Tuple="<pad>" , lowerCAmelCase__ : Union[str, Any]="<mask>" , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : Optional[int] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else bos_token
__SCREAMING_SNAKE_CASE : Optional[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else eos_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else sep_token
__SCREAMING_SNAKE_CASE : Tuple = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else cls_token
__SCREAMING_SNAKE_CASE : Dict = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else unk_token
__SCREAMING_SNAKE_CASE : List[Any] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE : int = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else mask_token
super().__init__(
errors=lowerCAmelCase__ , bos_token=lowerCAmelCase__ , eos_token=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , add_prefix_space=lowerCAmelCase__ , **lowerCAmelCase__ , )
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE : Dict = json.load(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in self.encoder.items()}
__SCREAMING_SNAKE_CASE : Dict = errors # how to handle errors in decoding
__SCREAMING_SNAKE_CASE : Optional[int] = bytes_to_unicode()
__SCREAMING_SNAKE_CASE : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCAmelCase__ , encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE : Tuple = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE : Optional[int] = [tuple(merge.split() ) for merge in bpe_merges]
__SCREAMING_SNAKE_CASE : str = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) )
__SCREAMING_SNAKE_CASE : Optional[int] = {}
__SCREAMING_SNAKE_CASE : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__SCREAMING_SNAKE_CASE : Tuple = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def UpperCamelCase__ ( self : Optional[int] ):
"""simple docstring"""
return len(self.encoder )
def UpperCamelCase__ ( self : List[str] ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = get_pairs(lowerCAmelCase__ )
if not pairs:
return token
while True:
__SCREAMING_SNAKE_CASE : Optional[int] = min(lowerCAmelCase__ , key=lambda lowerCAmelCase__ : self.bpe_ranks.get(lowerCAmelCase__ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = bigram
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : List[str] = 0
while i < len(lowerCAmelCase__ ):
try:
__SCREAMING_SNAKE_CASE : Any = word.index(lowerCAmelCase__ , lowerCAmelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE : Dict = j
if word[i] == first and i < len(lowerCAmelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE : Optional[int] = tuple(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = new_word
if len(lowerCAmelCase__ ) == 1:
break
else:
__SCREAMING_SNAKE_CASE : Tuple = get_pairs(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = """ """.join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = word
return word
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = []
for token in re.findall(self.pat , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCAmelCase__ ).split(""" """ ) )
return bpe_tokens
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.encoder.get(lowerCAmelCase__ , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Any , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
return self.decoder.get(lowerCAmelCase__ )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = """""".join(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(lowerCAmelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
__SCREAMING_SNAKE_CASE : str = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
lowerCAmelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase__ , ensure_ascii=lowerCAmelCase__ ) + """\n""" )
__SCREAMING_SNAKE_CASE : Optional[int] = 0
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE : Optional[Any] = token_index
writer.write(""" """.join(lowerCAmelCase__ ) + """\n""" )
index += 1
return vocab_file, merge_file
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id]
__SCREAMING_SNAKE_CASE : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None , lowerCAmelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase__ , token_ids_a=lowerCAmelCase__ , already_has_special_tokens=lowerCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCAmelCase__ )) + [1]
return [1] + ([0] * len(lowerCAmelCase__ )) + [1, 1] + ([0] * len(lowerCAmelCase__ )) + [1]
def UpperCamelCase__ ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id]
__SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict=False , **lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCAmelCase__ ) > 0 and not text[0].isspace()):
__SCREAMING_SNAKE_CASE : List[str] = """ """ + text
return (text, kwargs)
| 112
| 1
|
"""simple docstring"""
from math import ceil
def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ):
lowercase_ : List[Any] = list(range(0 , __SCREAMING_SNAKE_CASE ) )
lowercase_ : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
lowercase_ : Optional[int] = []
for i in device_map_blocks:
if device_map_blocks.count(__SCREAMING_SNAKE_CASE ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__SCREAMING_SNAKE_CASE )
# Missing blocks
lowercase_ : List[Any] = [i for i in blocks if i not in device_map_blocks]
lowercase_ : Optional[int] = [i for i in device_map_blocks if i not in blocks]
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.'
' These attention blocks were specified more than once: ' + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'There are attention blocks for this model that are not specified in the device_map. Add these attention '
'blocks to a device on the device_map: ' + str(__SCREAMING_SNAKE_CASE ) )
if len(__SCREAMING_SNAKE_CASE ) != 0:
raise ValueError(
'The device_map contains more attention blocks than this model has. Remove these from the device_map:'
+ str(__SCREAMING_SNAKE_CASE ) )
def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
lowercase_ : Optional[Any] = list(range(__SCREAMING_SNAKE_CASE ) )
lowercase_ : List[str] = int(ceil(n_layers / len(__SCREAMING_SNAKE_CASE ) ) )
lowercase_ : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )]
return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
| 321
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 1
|
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any]=10 ) -> str:
"""simple docstring"""
__lowerCamelCase = []
for _ in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=10 ) -> Any:
"""simple docstring"""
__lowerCamelCase = []
for step in range(UpperCamelCase__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase = os.path.join(UpperCamelCase__ , 'schedule.bin' )
torch.save(scheduler.state_dict() , UpperCamelCase__ )
__lowerCamelCase = torch.load(UpperCamelCase__ )
scheduler.load_state_dict(UpperCamelCase__ )
return lrs
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
'''simple docstring'''
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ )
def lowercase_ ( self ) -> Any:
'''simple docstring'''
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
__lowerCamelCase = criterion(lowerCamelCase__ , lowerCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def lowercase_ ( self ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase__ )
__lowerCamelCase = torch.tensor([0.4, 0.2, -0.5] )
__lowerCamelCase = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
__lowerCamelCase = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase__ , weight_decay=0.0 , relative_step=lowerCamelCase__ , scale_parameter=lowerCamelCase__ , warmup_init=lowerCamelCase__ , )
for _ in range(1_000 ):
__lowerCamelCase = criterion(lowerCamelCase__ , lowerCamelCase__ )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
snake_case_ = nn.Linear(50 , 50 ) if is_torch_available() else None
snake_case_ = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
snake_case_ = 10
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None ) -> Any:
'''simple docstring'''
self.assertEqual(len(lowerCamelCase__ ) , len(lowerCamelCase__ ) )
for a, b in zip(lowerCamelCase__ , lowerCamelCase__ ):
self.assertAlmostEqual(lowerCamelCase__ , lowerCamelCase__ , delta=lowerCamelCase__ , msg=lowerCamelCase__ )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = {'num_warmup_steps': 2, 'num_training_steps': 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
__lowerCamelCase = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{'num_warmup_steps': 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, 'num_cycles': 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, 'power': 2.0, 'lr_end': 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{'num_warmup_steps': 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
__lowerCamelCase , __lowerCamelCase = data
__lowerCamelCase = scheduler_func(self.optimizer , **lowerCamelCase__ )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
__lowerCamelCase = unwrap_schedule(lowerCamelCase__ , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase__ , lowerCamelCase__ , tol=1e-2 , msg=f"""failed for {scheduler_func} in normal scheduler""" , )
__lowerCamelCase = scheduler_func(self.optimizer , **lowerCamelCase__ )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase__ ) # wrap to test picklability of the schedule
__lowerCamelCase = unwrap_and_save_reload_schedule(lowerCamelCase__ , self.num_steps )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ , msg=f"""failed for {scheduler_func} in save and reload""" )
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = fn
def __call__( self , *lowerCamelCase__ , **lowerCamelCase__ ) -> Any:
'''simple docstring'''
return self.fn(*lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def lowercase_ ( self , lowerCamelCase__ ) -> Any:
'''simple docstring'''
__lowerCamelCase = list(map(self , scheduler.lr_lambdas ) )
| 90
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCAmelCase : Optional[Any] = None
try:
import msvcrt
except ImportError:
UpperCAmelCase : List[Any] = None
try:
import fcntl
except ImportError:
UpperCAmelCase : int = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCAmelCase : Union[str, Any] = OSError
# Data
# ------------------------------------------------
UpperCAmelCase : List[Any] = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
UpperCAmelCase : Tuple = '3.0.12'
UpperCAmelCase : str = None
def a__ ( ):
"""simple docstring"""
global _logger
__SCREAMING_SNAKE_CASE = _logger or logging.getLogger(__name__ )
return _logger
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = lock_file
return None
def __str__( self : str ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = f'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = lock
return None
def __enter__( self : List[str] ) -> List[Any]:
"""simple docstring"""
return self.lock
def __exit__( self : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] ) -> List[str]:
"""simple docstring"""
self.lock.release()
return None
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
__SCREAMING_SNAKE_CASE = self.hash_filename_if_too_long(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# The path to the lock file.
__SCREAMING_SNAKE_CASE = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__SCREAMING_SNAKE_CASE = None
# The default timeout value.
__SCREAMING_SNAKE_CASE = timeout
# We use this lock primarily for the lock counter.
__SCREAMING_SNAKE_CASE = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__SCREAMING_SNAKE_CASE = 0
return None
@property
def UpperCAmelCase__ ( self : List[Any] ) -> List[str]:
"""simple docstring"""
return self._lock_file
@property
def UpperCAmelCase__ ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
return self._timeout
@timeout.setter
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = float(__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
raise NotImplementedError()
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
raise NotImplementedError()
@property
def UpperCAmelCase__ ( self : Union[str, Any] ) -> int:
"""simple docstring"""
return self._lock_file_fd is not None
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=0.05 ) -> Optional[Any]:
"""simple docstring"""
if timeout is None:
__SCREAMING_SNAKE_CASE = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__SCREAMING_SNAKE_CASE = id(self )
__SCREAMING_SNAKE_CASE = self._lock_file
__SCREAMING_SNAKE_CASE = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(f'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__SCREAMING_SNAKE_CASE )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__SCREAMING_SNAKE_CASE = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__ ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ) -> Dict:
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__SCREAMING_SNAKE_CASE = id(self )
__SCREAMING_SNAKE_CASE = self._lock_file
logger().debug(f'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
__SCREAMING_SNAKE_CASE = 0
logger().debug(f'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
self.acquire()
return self
def __exit__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
self.release()
return None
def __del__( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.release(force=__SCREAMING_SNAKE_CASE )
return None
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.path.basename(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > max_length and max_length > 0:
__SCREAMING_SNAKE_CASE = os.path.dirname(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = str(hash(__SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = filename[: max_length - len(__SCREAMING_SNAKE_CASE ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
return path
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict=-1 , __SCREAMING_SNAKE_CASE : Dict=None ) -> List[Any]:
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
try:
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._lock_file_fd
__SCREAMING_SNAKE_CASE = None
msvcrt.locking(__SCREAMING_SNAKE_CASE , msvcrt.LK_UNLCK , 1 )
os.close(__SCREAMING_SNAKE_CASE )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=-1 , __SCREAMING_SNAKE_CASE : Optional[Any]=None ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.statvfs(os.path.dirname(__SCREAMING_SNAKE_CASE ) ).f_namemax
super().__init__(__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , max_filename_length=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
try:
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : List[Any] ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self._lock_file_fd
__SCREAMING_SNAKE_CASE = None
fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN )
os.close(__SCREAMING_SNAKE_CASE )
return None
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__SCREAMING_SNAKE_CASE = os.open(self._lock_file , __SCREAMING_SNAKE_CASE )
except OSError:
pass
else:
__SCREAMING_SNAKE_CASE = fd
return None
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
os.close(self._lock_file_fd )
__SCREAMING_SNAKE_CASE = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCAmelCase : Dict = None
if msvcrt:
UpperCAmelCase : Optional[int] = WindowsFileLock
elif fcntl:
UpperCAmelCase : Optional[Any] = UnixFileLock
else:
UpperCAmelCase : int = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 267
| 0
|
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowercase :
'''simple docstring'''
def __init__(self ,_lowerCamelCase ,_lowerCamelCase=13 ,_lowerCamelCase=30 ,_lowerCamelCase=2 ,_lowerCamelCase=3 ,_lowerCamelCase=True ,_lowerCamelCase=True ,_lowerCamelCase=32 ,_lowerCamelCase=5 ,_lowerCamelCase=4 ,_lowerCamelCase=37 ,_lowerCamelCase="gelu" ,_lowerCamelCase=0.1 ,_lowerCamelCase=0.1 ,_lowerCamelCase=10 ,_lowerCamelCase=0.0_2 ,_lowerCamelCase=None ,) -> Dict:
'''simple docstring'''
__lowercase = parent
__lowercase = batch_size
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = is_training
__lowercase = use_labels
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = type_sequence_label_size
__lowercase = initializer_range
__lowercase = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = (image_size // patch_size) ** 2
__lowercase = num_patches + 1
def _UpperCAmelCase (self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase = None
if self.use_labels:
__lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__lowercase = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
return ViTMSNConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = ViTMSNModel(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Tuple:
'''simple docstring'''
__lowercase = self.type_sequence_label_size
__lowercase = ViTMSNForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = model(_lowerCamelCase ,labels=_lowerCamelCase )
print('''Pixel and labels shape: {pixel_values.shape}, {labels.shape}''' )
print('''Labels: {labels}''' )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowercase = 1
__lowercase = ViTMSNForImageClassification(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
__lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowercase = model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase , __lowercase , __lowercase = config_and_inputs
__lowercase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
a : Dict = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
a : Dict = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
a : Optional[int] = False
a : str = False
a : Optional[Any] = False
a : List[Any] = False
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = ViTMSNModelTester(self )
__lowercase = ConfigTester(self ,config_class=_lowerCamelCase ,has_text_modality=_lowerCamelCase ,hidden_size=37 )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMSN does not use inputs_embeds''' )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCamelCase ,nn.Linear ) )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase , __lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase = model_class(_lowerCamelCase )
__lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase = [*signature.parameters.keys()]
__lowercase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_lowerCamelCase )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCamelCase )
@slow
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase = ViTMSNModel.from_pretrained(_lowerCamelCase )
self.assertIsNotNone(_lowerCamelCase )
def _lowerCAmelCase ( ):
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-msn-small''' ) if is_vision_available() else None
@slow
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(2 )
__lowercase = ViTMSNForImageClassification.from_pretrained('''facebook/vit-msn-small''' ).to(_lowerCamelCase )
__lowercase = self.default_image_processor
__lowercase = prepare_img()
__lowercase = image_processor(images=_lowerCamelCase ,return_tensors='''pt''' ).to(_lowerCamelCase )
# forward pass
with torch.no_grad():
__lowercase = model(**_lowerCamelCase )
# verify the logits
__lowercase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_lowerCamelCase )
__lowercase = torch.tensor([-0.0_8_0_3, -0.4_4_5_4, -0.2_3_7_5] ).to(_lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowerCamelCase ,atol=1E-4 ) )
| 367
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : int = 6_0_0_8_5_1_4_7_5_1_4_3 ):
try:
__lowercase = int(lowerCamelCase_ )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
__lowercase = 1
__lowercase = 2
while i * i <= n:
while n % i == 0:
__lowercase = i
n //= i
i += 1
if n > 1:
__lowercase = n
return int(lowerCamelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 217
| 0
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCAmelCase_ ( lowercase ):
"""simple docstring"""
UpperCamelCase_ : Dict ='Salesforce/blip-image-captioning-base'
UpperCamelCase_ : List[str] =(
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
UpperCamelCase_ : str ='image_captioner'
UpperCamelCase_ : str =AutoModelForVisionaSeq
UpperCamelCase_ : Optional[int] =['image']
UpperCamelCase_ : List[str] =['text']
def __init__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
requires_backends(self , ['''vision'''] )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return self.pre_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return self.model.generate(**SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase ( self , SCREAMING_SNAKE_CASE_ ) -> Tuple:
return self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE_ , skip_special_tokens=SCREAMING_SNAKE_CASE_ )[0].strip()
| 259
|
from __future__ import annotations
from typing import Any
def _A ( SCREAMING_SNAKE_CASE__ : list[Any] ):
create_state_space_tree(SCREAMING_SNAKE_CASE__ , [] , 0 )
def _A ( SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : list[Any] , SCREAMING_SNAKE_CASE__ : int ):
if index == len(SCREAMING_SNAKE_CASE__ ):
print(SCREAMING_SNAKE_CASE__ )
return
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
__snake_case = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(["""A""", """B""", """C"""])
generate_all_subsequences(seq)
| 259
| 1
|
import os
def __UpperCamelCase ( lowerCAmelCase__ : str = "input.txt" ):
with open(os.path.join(os.path.dirname(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) as input_file:
__a : Any = [
[int(lowerCAmelCase__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
__a : List[str] = len(lowerCAmelCase__ )
__a : Optional[Any] = len(matrix[0] )
__a : Dict = [[-1 for _ in range(lowerCAmelCase__ )] for _ in range(lowerCAmelCase__ )]
for i in range(lowerCAmelCase__ ):
__a : List[Any] = matrix[i][0]
for j in range(1 , lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
__a : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , lowerCAmelCase__ ):
__a : Tuple = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
__a : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 368
|
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
lowercase__ ='bert-base-cased'
lowercase__ ='google/pegasus-xsum'
lowercase__ =[' Sam ate lunch today.', 'Sams lunch ingredients.']
lowercase__ =['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
lowercase__ ='patrickvonplaten/t5-tiny-random'
lowercase__ ='sshleifer/bart-tiny-random'
lowercase__ ='sshleifer/tiny-mbart'
lowercase__ ='sshleifer/tiny-marian-en-de'
def __UpperCamelCase ( lowerCAmelCase__ : Path , lowerCAmelCase__ : list ):
__a : List[Any] = '''\n'''.join(lowerCAmelCase__ )
Path(lowerCAmelCase__ ).open('''w''' ).writelines(lowerCAmelCase__ )
def __UpperCamelCase ( lowerCAmelCase__ : int ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.source" ) , lowerCAmelCase__ )
_dump_articles(os.path.join(lowerCAmelCase__ , f"{split}.target" ) , lowerCAmelCase__ )
return tmp_dir
class UpperCamelCase__ ( __lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCAmelCase (self : int , snake_case_ : int ):
__a : Optional[Any] = AutoTokenizer.from_pretrained(snake_case_ )
__a : Optional[Any] = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a : Union[str, Any] = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
__a : str = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
__a : str = 4
__a : Dict = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__a , __a : Any = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__a : List[Any] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , src_lang=snake_case_ , tgt_lang=snake_case_ , )
__a : Dict = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(snake_case_ , snake_case_ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__a : Dict = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : str ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ )
__a : str = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in ARTICLES )
__a : Any = max(len(tokenizer.encode(snake_case_ ) ) for a in SUMMARIES )
__a : Dict = 4
__a : Optional[int] = LegacySeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=2_0 , max_target_length=snake_case_ , )
__a : Optional[Any] = DataLoader(snake_case_ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCAmelCase (self : List[str] ):
__a : int = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
__a : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__a : Optional[int] = tmp_dir.joinpath('''train.source''' ).open().readlines()
__a : List[Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(snake_case_ , snake_case_ , 1_2_8 , snake_case_ )
__a : Optional[Any] = {x.name for x in tmp_dir.iterdir()}
__a : Union[str, Any] = {x.name for x in save_dir.iterdir()}
__a : str = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(snake_case_ ) < len(snake_case_ )
assert len(snake_case_ ) == 1
assert len(packed_examples[0] ) == sum(len(snake_case_ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def lowerCAmelCase (self : Any ):
if not FAIRSEQ_AVAILABLE:
return
__a , __a , __a : Any = self._get_dataset(max_len=6_4 )
__a : int = 6_4
__a : List[str] = ds.make_dynamic_sampler(snake_case_ , required_batch_size_multiple=snake_case_ )
__a : List[str] = [len(snake_case_ ) for x in batch_sampler]
assert len(set(snake_case_ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(snake_case_ ) == len(snake_case_ ) # no dropped or added examples
__a : Union[str, Any] = DataLoader(snake_case_ , batch_sampler=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
__a : Tuple = []
__a : Union[str, Any] = []
for batch in data_loader:
__a : Any = batch['''input_ids'''].shape
__a : str = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__a : Optional[Any] = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(snake_case_ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(snake_case_ )
assert num_src_per_batch[0] == max(snake_case_ )
if failures:
raise AssertionError(f"too many tokens in {len(snake_case_ )} batches" )
def lowerCAmelCase (self : int ):
__a , __a , __a : Optional[int] = self._get_dataset(max_len=5_1_2 )
__a : Union[str, Any] = 2
__a : str = ds.make_sortish_sampler(snake_case_ , shuffle=snake_case_ )
__a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 )
__a : Tuple = DataLoader(snake_case_ , batch_size=snake_case_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=snake_case_ )
__a : Optional[int] = tokenizer.pad_token_id
def count_pad_tokens(snake_case_ : Union[str, Any] , snake_case_ : List[str]="input_ids" ):
return [batch[k].eq(snake_case_ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(snake_case_ , k='''labels''' ) ) < sum(count_pad_tokens(snake_case_ , k='''labels''' ) )
assert sum(count_pad_tokens(snake_case_ ) ) < sum(count_pad_tokens(snake_case_ ) )
assert len(snake_case_ ) == len(snake_case_ )
def lowerCAmelCase (self : int , snake_case_ : int=1_0_0_0 , snake_case_ : Optional[Any]=1_2_8 ):
if os.getenv('''USE_REAL_DATA''' , snake_case_ ):
__a : Optional[int] = '''examples/seq2seq/wmt_en_ro'''
__a : List[Any] = max_len * 2 * 6_4
if not Path(snake_case_ ).joinpath('''train.len''' ).exists():
save_len_file(snake_case_ , snake_case_ )
else:
__a : int = '''examples/seq2seq/test_data/wmt_en_ro'''
__a : List[str] = max_len * 4
save_len_file(snake_case_ , snake_case_ )
__a : str = AutoTokenizer.from_pretrained(snake_case_ )
__a : Optional[int] = SeqaSeqDataset(
snake_case_ , data_dir=snake_case_ , type_path='''train''' , max_source_length=snake_case_ , max_target_length=snake_case_ , n_obs=snake_case_ , )
return ds, max_tokens, tokenizer
def lowerCAmelCase (self : List[str] ):
__a , __a , __a : str = self._get_dataset()
__a : Optional[Any] = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=snake_case_ ) )
__a : Tuple = set(DistributedSortishSampler(snake_case_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=snake_case_ ) )
assert idsa.intersection(snake_case_ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCAmelCase (self : str , snake_case_ : Union[str, Any] ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(snake_case_ , use_fast=snake_case_ )
if tok_name == MBART_TINY:
__a : Any = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__a : Tuple = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__a : Optional[Any] = SeqaSeqDataset(
snake_case_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__a : List[Any] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(snake_case_ ) == 1 if tok_name == BART_TINY else len(snake_case_ ) == 0
| 90
| 0
|
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : int , a__ : int , a__ : int , a__ : int , a__ : Dict=0.0 , a__ : Optional[int] = None , a__ : str = "geglu" , a__ : Optional[int] = None , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = False , a__ : bool = True , a__ : str = "layer_norm" , a__ : bool = False , ):
"""simple docstring"""
super().__init__()
__snake_case = only_cross_attention
__snake_case = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero'''
__snake_case = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm'''
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f"""`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"""
f""" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.""" )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
__snake_case = AdaLayerNorm(a__ , a__ )
elif self.use_ada_layer_norm_zero:
__snake_case = AdaLayerNormZero(a__ , a__ )
else:
__snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ )
__snake_case = Attention(
query_dim=a__ , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a__ , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
__snake_case = (
AdaLayerNorm(a__ , a__ )
if self.use_ada_layer_norm
else nn.LayerNorm(a__ , elementwise_affine=a__ )
)
__snake_case = Attention(
query_dim=a__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , upcast_attention=a__ , ) # is self-attn if encoder_hidden_states is none
else:
__snake_case = None
__snake_case = None
# 3. Feed-forward
__snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ )
__snake_case = FeedForward(a__ , dropout=a__ , activation_fn=a__ , final_dropout=a__ )
# let chunk size default to None
__snake_case = None
__snake_case = 0
def a (self : int , a__ : Optional[int] , a__ : int ):
"""simple docstring"""
__snake_case = chunk_size
__snake_case = dim
def a (self : Dict , a__ : torch.FloatTensor , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.FloatTensor] = None , a__ : Optional[torch.LongTensor] = None , a__ : Dict[str, Any] = None , a__ : Optional[torch.LongTensor] = None , ):
"""simple docstring"""
if self.use_ada_layer_norm:
__snake_case = self.norma(a__ , a__ )
elif self.use_ada_layer_norm_zero:
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = self.norma(
a__ , a__ , a__ , hidden_dtype=hidden_states.dtype )
else:
__snake_case = self.norma(a__ )
__snake_case = cross_attention_kwargs if cross_attention_kwargs is not None else {}
__snake_case = self.attna(
a__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a__ , **a__ , )
if self.use_ada_layer_norm_zero:
__snake_case = gate_msa.unsqueeze(1 ) * attn_output
__snake_case = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
__snake_case = (
self.norma(a__ , a__ ) if self.use_ada_layer_norm else self.norma(a__ )
)
__snake_case = self.attna(
a__ , encoder_hidden_states=a__ , attention_mask=a__ , **a__ , )
__snake_case = attn_output + hidden_states
# 3. Feed-forward
__snake_case = self.norma(a__ )
if self.use_ada_layer_norm_zero:
__snake_case = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f"""`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.""" )
__snake_case = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
__snake_case = torch.cat(
[self.ff(a__ ) for hid_slice in norm_hidden_states.chunk(a__ , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
__snake_case = self.ff(a__ )
if self.use_ada_layer_norm_zero:
__snake_case = gate_mlp.unsqueeze(1 ) * ff_output
__snake_case = ff_output + hidden_states
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Any , a__ : int , a__ : Optional[int] = None , a__ : int = 4 , a__ : float = 0.0 , a__ : str = "geglu" , a__ : bool = False , ):
"""simple docstring"""
super().__init__()
__snake_case = int(dim * mult )
__snake_case = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
__snake_case = GELU(a__ , a__ )
if activation_fn == "gelu-approximate":
__snake_case = GELU(a__ , a__ , approximate='''tanh''' )
elif activation_fn == "geglu":
__snake_case = GEGLU(a__ , a__ )
elif activation_fn == "geglu-approximate":
__snake_case = ApproximateGELU(a__ , a__ )
__snake_case = nn.ModuleList([] )
# project in
self.net.append(a__ )
# project dropout
self.net.append(nn.Dropout(a__ ) )
# project out
self.net.append(nn.Linear(a__ , a__ ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(a__ ) )
def a (self : Union[str, Any] , a__ : Optional[int] ):
"""simple docstring"""
for module in self.net:
__snake_case = module(a__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Tuple , a__ : int , a__ : int , a__ : str = "none" ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(a__ , a__ )
__snake_case = approximate
def a (self : Any , a__ : str ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(a__ , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def a (self : Any , a__ : List[str] ):
"""simple docstring"""
__snake_case = self.proj(a__ )
__snake_case = self.gelu(a__ )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Optional[int] , a__ : int , a__ : int ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(a__ , dim_out * 2 )
def a (self : Dict , a__ : Optional[int] ):
"""simple docstring"""
if gate.device.type != "mps":
return F.gelu(a__ )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def a (self : str , a__ : List[Any] ):
"""simple docstring"""
__snake_case , __snake_case = self.proj(a__ ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(a__ )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Dict , a__ : int , a__ : int ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Linear(a__ , a__ )
def a (self : str , a__ : Tuple ):
"""simple docstring"""
__snake_case = self.proj(a__ )
return x * torch.sigmoid(1.7_0_2 * x )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Dict , a__ : Optional[Any] , a__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__snake_case = nn.Embedding(a__ , a__ )
__snake_case = nn.SiLU()
__snake_case = nn.Linear(a__ , embedding_dim * 2 )
__snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ )
def a (self : Any , a__ : Optional[int] , a__ : Union[str, Any] ):
"""simple docstring"""
__snake_case = self.linear(self.silu(self.emb(a__ ) ) )
__snake_case , __snake_case = torch.chunk(a__ , 2 )
__snake_case = self.norm(a__ ) * (1 + scale) + shift
return x
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Dict , a__ : Dict , a__ : str ):
"""simple docstring"""
super().__init__()
__snake_case = CombinedTimestepLabelEmbeddings(a__ , a__ )
__snake_case = nn.SiLU()
__snake_case = nn.Linear(a__ , 6 * embedding_dim , bias=a__ )
__snake_case = nn.LayerNorm(a__ , elementwise_affine=a__ , eps=1E-6 )
def a (self : Optional[Any] , a__ : List[Any] , a__ : Union[str, Any] , a__ : Optional[Any] , a__ : Union[str, Any]=None ):
"""simple docstring"""
__snake_case = self.linear(self.silu(self.emb(a__ , a__ , hidden_dtype=a__ ) ) )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case = emb.chunk(6 , dim=1 )
__snake_case = self.norm(a__ ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
def __init__(self : Optional[int] , a__ : int , a__ : int , a__ : int , a__ : Optional[str] = None , a__ : float = 1E-5 ):
"""simple docstring"""
super().__init__()
__snake_case = num_groups
__snake_case = eps
if act_fn is None:
__snake_case = None
else:
__snake_case = get_activation(a__ )
__snake_case = nn.Linear(a__ , out_dim * 2 )
def a (self : List[str] , a__ : Dict , a__ : List[str] ):
"""simple docstring"""
if self.act:
__snake_case = self.act(a__ )
__snake_case = self.linear(a__ )
__snake_case = emb[:, :, None, None]
__snake_case , __snake_case = emb.chunk(2 , dim=1 )
__snake_case = F.group_norm(a__ , self.num_groups , eps=self.eps )
__snake_case = x * (1 + scale) + shift
return x
| 24
|
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 184
| 0
|
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""adapter_layer""": """encoder.layers.*.adapter_layer""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
"""pooling_layer.linear""": """projector""",
"""pooling_layer.projection""": """classifier""",
}
_UpperCamelCase = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""projector""",
"""classifier""",
]
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
with open(__lowerCamelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCamelCase ):
UpperCAmelCase = line.strip()
if line:
UpperCAmelCase = line.split()
UpperCAmelCase = line_number
UpperCAmelCase = words[0]
UpperCAmelCase = value
return result
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
for attribute in key.split(""".""" ):
UpperCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
UpperCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase = "param"
if weight_type is not None and weight_type != "param":
UpperCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase ).shape
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = hf_pointer
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = shape_pointer.shape
# let's reduce dimension
UpperCAmelCase = value[0]
else:
UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase = value
elif weight_type == "weight_g":
UpperCAmelCase = value
elif weight_type == "weight_v":
UpperCAmelCase = value
elif weight_type == "bias":
UpperCAmelCase = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
UpperCAmelCase = getattr(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = value
else:
UpperCAmelCase = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCamelCase ):
UpperCAmelCase = PARAM_MAPPING[full_name.split(""".""" )[-1]]
UpperCAmelCase = "param"
if weight_type is not None and weight_type != "param":
UpperCAmelCase = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
UpperCAmelCase = ".".join([key, hf_param_name] )
else:
UpperCAmelCase = key
UpperCAmelCase = value if "lm_head" in full_key else value[0]
_UpperCamelCase = {
"""W_a""": """linear_1.weight""",
"""W_b""": """linear_2.weight""",
"""b_a""": """linear_1.bias""",
"""b_b""": """linear_2.bias""",
"""ln_W""": """norm.weight""",
"""ln_b""": """norm.bias""",
}
def _a ( _snake_case , _snake_case , _snake_case=None , _snake_case=None ):
"""simple docstring"""
UpperCAmelCase = False
for key, mapped_key in MAPPING.items():
UpperCAmelCase = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
UpperCAmelCase = True
if "*" in mapped_key:
UpperCAmelCase = name.split(__lowerCamelCase )[0].split(""".""" )[-2]
UpperCAmelCase = mapped_key.replace("""*""" , __lowerCamelCase )
if "weight_g" in name:
UpperCAmelCase = "weight_g"
elif "weight_v" in name:
UpperCAmelCase = "weight_v"
elif "bias" in name:
UpperCAmelCase = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase = "weight"
else:
UpperCAmelCase = None
if hf_dict is not None:
rename_dict(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
else:
set_recursively(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return is_used
return is_used
def _a ( _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = fairseq_model.state_dict()
UpperCAmelCase = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
UpperCAmelCase = True
else:
UpperCAmelCase = load_wavaveca_layer(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if not is_used:
unused_weights.append(__lowerCamelCase )
logger.warning(F'''Unused weights: {unused_weights}''' )
def _a ( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = full_name.split("""conv_layers.""" )[-1]
UpperCAmelCase = name.split(""".""" )
UpperCAmelCase = int(items[0] )
UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowerCamelCase )
@torch.no_grad()
def _a ( _snake_case , _snake_case , _snake_case=None , _snake_case=None , _snake_case=True , _snake_case=False ):
"""simple docstring"""
if config_path is not None:
UpperCAmelCase = WavaVecaConfig.from_pretrained(__lowerCamelCase )
else:
UpperCAmelCase = WavaVecaConfig()
if is_seq_class:
UpperCAmelCase = read_txt_into_dict(__lowerCamelCase )
UpperCAmelCase = idalabel
UpperCAmelCase = WavaVecaForSequenceClassification(__lowerCamelCase )
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
feature_extractor.save_pretrained(__lowerCamelCase )
elif is_finetuned:
if dict_path:
UpperCAmelCase = Dictionary.load(__lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCAmelCase = target_dict.pad_index
UpperCAmelCase = target_dict.bos_index
UpperCAmelCase = target_dict.eos_index
UpperCAmelCase = len(target_dict.symbols )
UpperCAmelCase = os.path.join(__lowerCamelCase , """vocab.json""" )
if not os.path.isdir(__lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCamelCase ) )
return
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
UpperCAmelCase = 0
UpperCAmelCase = 1
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase = WavaVecaCTCTokenizer(
__lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCamelCase , )
UpperCAmelCase = True if config.feat_extract_norm == "layer" else False
UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__lowerCamelCase , return_attention_mask=__lowerCamelCase , )
UpperCAmelCase = WavaVecaProcessor(feature_extractor=__lowerCamelCase , tokenizer=__lowerCamelCase )
processor.save_pretrained(__lowerCamelCase )
UpperCAmelCase = WavaVecaForCTC(__lowerCamelCase )
else:
UpperCAmelCase = WavaVecaForPreTraining(__lowerCamelCase )
if is_finetuned or is_seq_class:
UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
UpperCAmelCase = argparse.Namespace(task="""audio_pretraining""" )
UpperCAmelCase = fairseq.tasks.setup_task(__lowerCamelCase )
UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCamelCase )
UpperCAmelCase = model[0].eval()
recursively_load_weights(__lowerCamelCase , __lowerCamelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCamelCase )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
parser.add_argument(
"""--is_seq_class""",
action="""store_true""",
help="""Whether the model to convert is a fine-tuned sequence classification model or not""",
)
_UpperCamelCase = parser.parse_args()
_UpperCamelCase = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 351
|
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
_UpperCamelCase = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
_UpperCamelCase = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def _a ( _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = create_model(
"""HTSAT-tiny""" , """roberta""" , _snake_case , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=_snake_case , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def _a ( _snake_case ):
"""simple docstring"""
UpperCAmelCase = {}
UpperCAmelCase = R""".*sequential.(\d+).*"""
UpperCAmelCase = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase = key.replace(_snake_case , _snake_case )
if re.match(_snake_case , _snake_case ):
# replace sequential layers with list
UpperCAmelCase = re.match(_snake_case , _snake_case ).group(1 )
UpperCAmelCase = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(_snake_case )//3}.linear.''' )
elif re.match(_snake_case , _snake_case ):
UpperCAmelCase = int(re.match(_snake_case , _snake_case ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
UpperCAmelCase = 1 if projecton_layer == 0 else 2
UpperCAmelCase = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
UpperCAmelCase = value
UpperCAmelCase = mixed_qkv.size(0 ) // 3
UpperCAmelCase = mixed_qkv[:qkv_dim]
UpperCAmelCase = mixed_qkv[qkv_dim : qkv_dim * 2]
UpperCAmelCase = mixed_qkv[qkv_dim * 2 :]
UpperCAmelCase = query_layer
UpperCAmelCase = key_layer
UpperCAmelCase = value_layer
else:
UpperCAmelCase = value
return model_state_dict
def _a ( _snake_case , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = init_clap(_snake_case , enable_fusion=_snake_case )
clap_model.eval()
UpperCAmelCase = clap_model.state_dict()
UpperCAmelCase = rename_state_dict(_snake_case )
UpperCAmelCase = ClapConfig()
UpperCAmelCase = enable_fusion
UpperCAmelCase = ClapModel(_snake_case )
# ignore the spectrogram embedding layer
model.load_state_dict(_snake_case , strict=_snake_case )
model.save_pretrained(_snake_case )
transformers_config.save_pretrained(_snake_case )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
_UpperCamelCase = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 234
| 0
|
'''simple docstring'''
from math import ceil
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Tuple:
UpperCamelCase = list(range(0 , __UpperCamelCase ) )
UpperCamelCase = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
UpperCamelCase = []
for i in device_map_blocks:
if device_map_blocks.count(__UpperCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(__UpperCamelCase )
# Missing blocks
UpperCamelCase = [i for i in blocks if i not in device_map_blocks]
UpperCamelCase = [i for i in device_map_blocks if i not in blocks]
if len(__UpperCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(__UpperCamelCase ) )
if len(__UpperCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(__UpperCamelCase ) )
if len(__UpperCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(__UpperCamelCase ) )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> List[Any]:
UpperCamelCase = list(range(__UpperCamelCase ) )
UpperCamelCase = int(ceil(n_layers / len(__UpperCamelCase ) ) )
UpperCamelCase = [layers[i : i + n_blocks] for i in range(0 , __UpperCamelCase , __UpperCamelCase )]
return dict(zip(__UpperCamelCase , __UpperCamelCase ) )
| 321
|
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> int:
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
UpperCamelCase = 1
UpperCamelCase = 1
while repunit:
UpperCamelCase = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase__ ( __UpperCamelCase = 1000000 )-> int:
UpperCamelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(__UpperCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'{solution() = }')
| 321
| 1
|
import re
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowercase__ , lowercase__ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 362
|
import argparse
import copy
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = {}
with open(_A ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[1], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
SCREAMING_SNAKE_CASE__ = []
_list.append([line.split()[0], line.split()[2]] )
SCREAMING_SNAKE_CASE__ = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
with open(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.read(1 )
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = start_node
SCREAMING_SNAKE_CASE__ = 0
while visiting not in first_solution:
SCREAMING_SNAKE_CASE__ = 1_00_00
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(_A ) and k[0] not in first_solution:
SCREAMING_SNAKE_CASE__ = k[1]
SCREAMING_SNAKE_CASE__ = k[0]
first_solution.append(_A )
SCREAMING_SNAKE_CASE__ = distance_of_first_solution + int(_A )
SCREAMING_SNAKE_CASE__ = best_node
first_solution.append(_A )
SCREAMING_SNAKE_CASE__ = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
SCREAMING_SNAKE_CASE__ = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 1_00_00
)
return first_solution, distance_of_first_solution
def UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = []
for n in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(_A )
for kn in solution[1:-1]:
SCREAMING_SNAKE_CASE__ = solution.index(_A )
if n == kn:
continue
SCREAMING_SNAKE_CASE__ = copy.deepcopy(_A )
SCREAMING_SNAKE_CASE__ = kn
SCREAMING_SNAKE_CASE__ = n
SCREAMING_SNAKE_CASE__ = 0
for k in _tmp[:-1]:
SCREAMING_SNAKE_CASE__ = _tmp[_tmp.index(_A ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
SCREAMING_SNAKE_CASE__ = distance + int(i[1] )
_tmp.append(_A )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
SCREAMING_SNAKE_CASE__ = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda _A : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def UpperCAmelCase_ ( _A , _A , _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = first_solution
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = distance_of_first_solution
SCREAMING_SNAKE_CASE__ = solution
while count <= iters:
SCREAMING_SNAKE_CASE__ = find_neighborhood(_A , _A )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
SCREAMING_SNAKE_CASE__ = len(_A ) - 1
SCREAMING_SNAKE_CASE__ = False
while not found:
SCREAMING_SNAKE_CASE__ = 0
while i < len(_A ):
if best_solution[i] != solution[i]:
SCREAMING_SNAKE_CASE__ = best_solution[i]
SCREAMING_SNAKE_CASE__ = solution[i]
break
SCREAMING_SNAKE_CASE__ = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = best_solution[:-1]
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
SCREAMING_SNAKE_CASE__ = cost
SCREAMING_SNAKE_CASE__ = solution
else:
SCREAMING_SNAKE_CASE__ = index_of_best_solution + 1
SCREAMING_SNAKE_CASE__ = neighborhood[index_of_best_solution]
if len(_A ) >= size:
tabu_list.pop(0 )
SCREAMING_SNAKE_CASE__ = count + 1
return best_solution_ever, best_cost
def UpperCAmelCase_ ( _A=None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = generate_neighbours(args.File )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = generate_first_solution(
args.File , _A )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = tabu_search(
_A , _A , _A , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 218
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.