code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import qiskit
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> qiskit.result.counts.Counts:
SCREAMING_SNAKE_CASE_ = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
SCREAMING_SNAKE_CASE_ = qiskit.QuantumCircuit(__UpperCAmelCase , __UpperCAmelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
SCREAMING_SNAKE_CASE_ = qiskit.execute(__UpperCAmelCase , __UpperCAmelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(__UpperCAmelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 31
|
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
A__= ['flax', 'transformers']
def __init__( self : int , *_lowercase : Union[str, Any] , **_lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _UpperCAmelCase ( cls : str , *_lowercase : Tuple , **_lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _UpperCAmelCase ( cls : Optional[int] , *_lowercase : List[Any] , **_lowercase : Dict ):
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"] )
class lowercase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
A__= ['flax', 'transformers']
def __init__( self : Optional[Any] , *_lowercase : int , **_lowercase : List[Any] ):
"""simple docstring"""
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _UpperCAmelCase ( cls : Optional[Any] , *_lowercase : Optional[Any] , **_lowercase : Any ):
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _UpperCAmelCase ( cls : str , *_lowercase : Any , **_lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"] )
class lowercase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
A__= ['flax', 'transformers']
def __init__( self : str , *_lowercase : Union[str, Any] , **_lowercase : List[str] ):
"""simple docstring"""
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _UpperCAmelCase ( cls : Any , *_lowercase : Any , **_lowercase : List[str] ):
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _UpperCAmelCase ( cls : Tuple , *_lowercase : str , **_lowercase : Optional[Any] ):
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"] )
class lowercase__ ( metaclass=__SCREAMING_SNAKE_CASE ):
A__= ['flax', 'transformers']
def __init__( self : Any , *_lowercase : Dict , **_lowercase : str ):
"""simple docstring"""
requires_backends(self , ["flax", "transformers"] )
@classmethod
def _UpperCAmelCase ( cls : List[str] , *_lowercase : Tuple , **_lowercase : int ):
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"] )
@classmethod
def _UpperCAmelCase ( cls : Any , *_lowercase : str , **_lowercase : Optional[int] ):
"""simple docstring"""
requires_backends(cls , ["flax", "transformers"] )
| 475
| 0
|
# using dfs for finding eulerian path traversal
def snake_case ( snake_case__ :Any , snake_case__ :Union[str, Any] , snake_case__ :Union[str, Any] , snake_case__ :Tuple=None) -> Dict:
_A = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
_A , _A = True, True
_A = dfs(snake_case__ , snake_case__ , snake_case__ , snake_case__)
return path
def snake_case ( snake_case__ :List[Any] , snake_case__ :str) -> Tuple:
_A = 0
_A = -1
for i in range(snake_case__):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
_A = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def snake_case ( snake_case__ :Union[str, Any] , snake_case__ :Any) -> Optional[int]:
_A = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
_A , _A = check_circuit_or_path(snake_case__ , snake_case__)
if check == 3:
print("""graph is not Eulerian""")
print("""no path""")
return
_A = 1
if check == 2:
_A = odd_node
print("""graph has a Euler path""")
if check == 1:
print("""graph has a Euler cycle""")
_A = dfs(snake_case__ , snake_case__ , snake_case__)
print(snake_case__)
def snake_case ( ) -> Any:
_A = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
_A = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
_A = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
_A = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
_A = {
1: [],
2: []
# all degree is zero
}
_A = 10
check_euler(snake_case__ , snake_case__)
check_euler(snake_case__ , snake_case__)
check_euler(snake_case__ , snake_case__)
check_euler(snake_case__ , snake_case__)
check_euler(snake_case__ , snake_case__)
if __name__ == "__main__":
main()
| 83
|
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class a ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=13 , lowerCAmelCase_=7 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=32 , lowerCAmelCase_=5 , lowerCAmelCase_=4 , lowerCAmelCase_=37 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.1 , lowerCAmelCase_=0.1 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_="None" , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ) -> Union[str, Any]:
_A = parent
_A = batch_size
_A = seq_length
_A = is_training
_A = use_input_mask
_A = use_token_type_ids
_A = use_labels
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = type_sequence_label_size
_A = initializer_range
_A = num_labels
_A = num_choices
_A = relative_attention
_A = position_biased_input
_A = pos_att_type
_A = scope
def UpperCAmelCase ( self ) -> Dict:
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_A = None
if self.use_token_type_ids:
_A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase ( self ) -> Optional[int]:
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def UpperCAmelCase ( self , lowerCAmelCase_ ) -> Any:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_A = DebertaVaModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )[0]
_A = model(lowerCAmelCase_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> List[str]:
_A = DebertaVaForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_A = self.num_labels
_A = DebertaVaForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowerCAmelCase_ )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = self.num_labels
_A = DebertaVaForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]:
_A = DebertaVaForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_A = DebertaVaForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_A = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_A = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :int = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
lowerCamelCase :str = (
{
'''feature-extraction''': DebertaVaModel,
'''fill-mask''': DebertaVaForMaskedLM,
'''question-answering''': DebertaVaForQuestionAnswering,
'''text-classification''': DebertaVaForSequenceClassification,
'''token-classification''': DebertaVaForTokenClassification,
'''zero-shot''': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase :str = True
lowerCamelCase :Union[str, Any] = False
lowerCamelCase :Optional[int] = False
lowerCamelCase :List[str] = False
lowerCamelCase :str = False
def UpperCAmelCase ( self ) -> Optional[int]:
_A = DebertaVaModelTester(self )
_A = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def UpperCAmelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def UpperCAmelCase ( self ) -> List[str]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Any:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> int:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Dict:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowerCAmelCase_ )
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*lowerCAmelCase_ )
@slow
def UpperCAmelCase ( self ) -> Any:
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = DebertaVaModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class a ( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def UpperCAmelCase ( self ) -> int:
pass
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
_A = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
_A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_A = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
# compare the actual values for a slice.
_A = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowerCAmelCase_ , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 83
| 1
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE_: Any =logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_: Tuple ='▁'
SCREAMING_SNAKE_CASE_: List[Any] ={
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
'tokenizer_config_file': 'tokenizer_config.json',
}
SCREAMING_SNAKE_CASE_: Any ={
'vocab_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json',
},
'spm_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_config_file': {
'facebook/m2m100_418M': 'https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json',
'facebook/m2m100_1.2B': 'https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json',
},
}
SCREAMING_SNAKE_CASE_: int ={
'facebook/m2m100_418M': 10_24,
}
# fmt: off
SCREAMING_SNAKE_CASE_: Union[str, Any] ={
'm2m100': ['af', 'am', 'ar', 'ast', 'az', 'ba', 'be', 'bg', 'bn', 'br', 'bs', 'ca', 'ceb', 'cs', 'cy', 'da', 'de', 'el', 'en', 'es', 'et', 'fa', 'ff', 'fi', 'fr', 'fy', 'ga', 'gd', 'gl', 'gu', 'ha', 'he', 'hi', 'hr', 'ht', 'hu', 'hy', 'id', 'ig', 'ilo', 'is', 'it', 'ja', 'jv', 'ka', 'kk', 'km', 'kn', 'ko', 'lb', 'lg', 'ln', 'lo', 'lt', 'lv', 'mg', 'mk', 'ml', 'mn', 'mr', 'ms', 'my', 'ne', 'nl', 'no', 'ns', 'oc', 'or', 'pa', 'pl', 'ps', 'pt', 'ro', 'ru', 'sd', 'si', 'sk', 'sl', 'so', 'sq', 'sr', 'ss', 'su', 'sv', 'sw', 'ta', 'th', 'tl', 'tn', 'tr', 'uk', 'ur', 'uz', 'vi', 'wo', 'xh', 'yi', 'yo', 'zh', 'zu'],
'wmt21': ['en', 'ha', 'is', 'ja', 'cs', 'ru', 'zh', 'de']
}
class __A ( UpperCamelCase__ ):
a__ : Any = VOCAB_FILES_NAMES
a__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ : str = PRETRAINED_VOCAB_FILES_MAP
a__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
a__ : List[int] = []
a__ : List[int] = []
def __init__(self : List[str] , __a : List[str] , __a : Any , __a : Optional[int]=None , __a : Optional[int]=None , __a : Dict="<s>" , __a : Optional[Any]="</s>" , __a : List[str]="</s>" , __a : Union[str, Any]="<pad>" , __a : str="<unk>" , __a : str="m2m100" , __a : Optional[Dict[str, Any]] = None , __a : int=8 , **__a : int , ):
UpperCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCAmelCase_ = language_codes
UpperCAmelCase_ = FAIRSEQ_LANGUAGE_CODES[language_codes]
UpperCAmelCase_ = {lang_code: f"""__{lang_code}__""" for lang_code in fairseq_language_code}
UpperCAmelCase_ = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(__a )
for lang_code in fairseq_language_code
if self.get_lang_token(__a ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , bos_token=__a , eos_token=__a , sep_token=__a , unk_token=__a , pad_token=__a , language_codes=__a , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=__a , **__a , )
UpperCAmelCase_ = vocab_file
UpperCAmelCase_ = load_json(__a )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = spm_file
UpperCAmelCase_ = load_spm(__a , self.sp_model_kwargs )
UpperCAmelCase_ = len(self.encoder )
UpperCAmelCase_ = {
self.get_lang_token(__a ): self.encoder_size + i for i, lang_code in enumerate(__a )
}
UpperCAmelCase_ = {lang_code: self.encoder_size + i for i, lang_code in enumerate(__a )}
UpperCAmelCase_ = {v: k for k, v in self.lang_token_to_id.items()}
UpperCAmelCase_ = src_lang if src_lang is not None else "en"
UpperCAmelCase_ = tgt_lang
UpperCAmelCase_ = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
UpperCAmelCase_ = num_madeup_words
@property
def _lowercase (self : List[str] ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def _lowercase (self : str ):
return self._src_lang
@src_lang.setter
def _lowercase (self : Optional[int] , __a : str ):
UpperCAmelCase_ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowercase (self : List[Any] , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def _lowercase (self : str , __a : str ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(__a , self.encoder[self.unk_token] )
def _lowercase (self : Union[str, Any] , __a : int ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(__a , self.unk_token )
def _lowercase (self : List[str] , __a : Union[str, Any] ):
UpperCAmelCase_ = []
UpperCAmelCase_ = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
UpperCAmelCase_ = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def _lowercase (self : List[str] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
UpperCAmelCase_ = [1] * len(self.prefix_tokens )
UpperCAmelCase_ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def _lowercase (self : List[Any] , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Tuple ):
UpperCAmelCase_ = self.__dict__.copy()
UpperCAmelCase_ = None
return state
def __setstate__(self : Tuple , __a : Dict ):
UpperCAmelCase_ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = load_spm(self.spm_file , self.sp_model_kwargs )
def _lowercase (self : Tuple , __a : str , __a : Optional[str] = None ):
UpperCAmelCase_ = Path(__a )
if not save_dir.is_dir():
raise OSError(f"""{save_directory} should be a directory""" )
UpperCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["vocab_file"]
)
UpperCAmelCase_ = save_dir / (
(filename_prefix + "-" if filename_prefix else "") + self.vocab_files_names["spm_file"]
)
save_json(self.encoder , __a )
if os.path.abspath(self.spm_file ) != os.path.abspath(__a ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , __a )
elif not os.path.isfile(self.spm_file ):
with open(__a , "wb" ) as fi:
UpperCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(__a )
return (str(__a ), str(__a ))
def _lowercase (self : int , __a : List[str] , __a : str = "en" , __a : Optional[List[str]] = None , __a : str = "ro" , **__a : Optional[Any] , ):
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(__a , __a , **__a )
def _lowercase (self : str , __a : List[Any] , __a : Optional[str] , __a : Optional[str] , **__a : Tuple ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase_ = src_lang
UpperCAmelCase_ = self(__a , add_special_tokens=__a , **__a )
UpperCAmelCase_ = self.get_lang_id(__a )
UpperCAmelCase_ = tgt_lang_id
return inputs
def _lowercase (self : int ):
self.set_src_lang_special_tokens(self.src_lang )
def _lowercase (self : Tuple ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowercase (self : int , __a : str ):
UpperCAmelCase_ = self.get_lang_token(__a )
UpperCAmelCase_ = self.lang_token_to_id[lang_token]
UpperCAmelCase_ = [self.cur_lang_id]
UpperCAmelCase_ = [self.eos_token_id]
def _lowercase (self : Optional[Any] , __a : str ):
UpperCAmelCase_ = self.get_lang_token(__a )
UpperCAmelCase_ = self.lang_token_to_id[lang_token]
UpperCAmelCase_ = [self.cur_lang_id]
UpperCAmelCase_ = [self.eos_token_id]
def _lowercase (self : List[Any] , __a : str ):
return self.lang_code_to_token[lang]
def _lowercase (self : str , __a : str ):
UpperCAmelCase_ = self.get_lang_token(__a )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
'''simple docstring'''
UpperCAmelCase_ = sentencepiece.SentencePieceProcessor(**snake_case_ )
spm.Load(str(snake_case_ ) )
return spm
def lowerCAmelCase_ ( snake_case_ : str ) -> Union[Dict, List]:
'''simple docstring'''
with open(snake_case_ , "r" ) as f:
return json.load(snake_case_ )
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : str ) -> None:
'''simple docstring'''
with open(snake_case_ , "w" ) as f:
json.dump(snake_case_ , snake_case_ , indent=2 )
| 78
|
"""simple docstring"""
# flake8: noqa
# Lint as: python3
A_ : List[str] = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 196
| 0
|
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
SCREAMING_SNAKE_CASE = """bart"""
SCREAMING_SNAKE_CASE = True
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( )-> Union[str, Any]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased" )
UpperCamelCase = AutoModel.from_pretrained("yjernite/retribert-base-uncased" ).to("cuda:0" )
UpperCamelCase = qar_model.eval()
else:
UpperCamelCase , UpperCamelCase = (None, None)
if MODEL_TYPE == "bart":
UpperCamelCase = AutoTokenizer.from_pretrained("yjernite/bart_eli5" )
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("yjernite/bart_eli5" ).to("cuda:0" )
UpperCamelCase = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth" )
sas_model.load_state_dict(save_dict["model"] )
UpperCamelCase = sas_model.eval()
else:
UpperCamelCase , UpperCamelCase = make_qa_sas_model(
model_name="t5-small" , from_file="seq2seq_models/eli5_t5_model_1024_4.pth" , device="cuda:0" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( )-> Optional[int]:
"""simple docstring"""
if LOAD_DENSE_INDEX:
UpperCamelCase = faiss.StandardGpuResources()
UpperCamelCase = datasets.load_dataset(path="wiki_snippets" , name="wiki40b_en_100_0" )["train"]
UpperCamelCase = np.memmap(
"wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat" , dtype="float32" , mode="r" , shape=(wikiaab_passages.num_rows, 1_28) , )
UpperCamelCase = faiss.IndexFlatIP(1_28 )
UpperCamelCase = faiss.index_cpu_to_gpu(UpperCAmelCase_ , 1 , UpperCAmelCase_ )
wikiaab_gpu_index_flat.add(UpperCAmelCase_ ) # TODO fix for larger GPU
else:
UpperCamelCase , UpperCamelCase = (None, None)
UpperCamelCase = Elasticsearch([{"host": "localhost", "port": "9200"}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=UpperCAmelCase_ )
def lowerCamelCase__ ( )-> Optional[Any]:
"""simple docstring"""
UpperCamelCase = datasets.load_dataset("eli5" , name="LFQA_reddit" )
UpperCamelCase = elia["train_eli5"]
UpperCamelCase = np.memmap(
"eli5_questions_reps.dat" , dtype="float32" , mode="r" , shape=(elia_train.num_rows, 1_28) )
UpperCamelCase = faiss.IndexFlatIP(1_28 )
eli5_train_q_index.add(UpperCAmelCase_ )
return (elia_train, eli5_train_q_index)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_indexes()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_models()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = load_train_data()
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_=10 )-> str:
"""simple docstring"""
UpperCamelCase = embed_questions_for_retrieval([question] , UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase , UpperCamelCase = eli5_train_q_index.search(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = [elia_train[int(UpperCAmelCase_ )] for i in I[0]]
return nn_examples
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_="wiki40b" , UpperCAmelCase_="dense" , UpperCAmelCase_=10 )-> List[str]:
"""simple docstring"""
if source == "none":
UpperCamelCase , UpperCamelCase = (" <P> ".join(["" for _ in range(11 )] ).strip(), [])
else:
if method == "dense":
UpperCamelCase , UpperCamelCase = query_qa_dense_index(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
else:
UpperCamelCase , UpperCamelCase = query_es_index(
UpperCAmelCase_ , UpperCAmelCase_ , index_name="english_wiki40b_snippets_100w" , n_results=UpperCAmelCase_ , )
UpperCamelCase = [
(res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst
]
UpperCamelCase = "question: {} context: {}".format(UpperCAmelCase_ , UpperCAmelCase_ )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda UpperCAmelCase_ : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda UpperCAmelCase_ : None),
} )
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=64 , UpperCAmelCase_=2_56 , UpperCAmelCase_=False , UpperCAmelCase_=2 , UpperCAmelCase_=0.95 , UpperCAmelCase_=0.8 )-> int:
"""simple docstring"""
with torch.no_grad():
UpperCamelCase = qa_sas_generate(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , num_answers=1 , num_beams=UpperCAmelCase_ , min_len=UpperCAmelCase_ , max_len=UpperCAmelCase_ , do_sample=UpperCAmelCase_ , temp=UpperCAmelCase_ , top_p=UpperCAmelCase_ , top_k=UpperCAmelCase_ , max_input_length=10_24 , device="cuda:0" , )[0]
return (answer, support_list)
st.title("""Long Form Question Answering with ELI5""")
# Start sidebar
SCREAMING_SNAKE_CASE = """<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"""
SCREAMING_SNAKE_CASE = """
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class=\"img-container\"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
""" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
SCREAMING_SNAKE_CASE = """
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
"""
st.sidebar.markdown(description, unsafe_allow_html=True)
SCREAMING_SNAKE_CASE = [
"""Answer the question""",
"""View the retrieved document only""",
"""View the most similar ELI5 question and answer""",
"""Show me everything, please!""",
]
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Demo options""")
if demo_options:
SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
action_list,
index=3,
)
SCREAMING_SNAKE_CASE = action_list.index(action_st)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox(
"""""",
["""Show full text of passages""", """Show passage section titles"""],
index=0,
)
SCREAMING_SNAKE_CASE = show_type == """Show full text of passages"""
else:
SCREAMING_SNAKE_CASE = 3
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Retrieval options""")
if retrieval_options:
SCREAMING_SNAKE_CASE = """
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
"""
st.sidebar.markdown(retriever_info)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia format should the model use?""", ["""wiki40b""", """none"""])
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Which Wikipedia indexer should the model use?""", ["""dense""", """sparse""", """mixed"""])
else:
SCREAMING_SNAKE_CASE = """wiki40b"""
SCREAMING_SNAKE_CASE = """dense"""
SCREAMING_SNAKE_CASE = """beam"""
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 64
SCREAMING_SNAKE_CASE = 256
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = st.sidebar.checkbox("""Generation options""")
if generate_options:
SCREAMING_SNAKE_CASE = """
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder's output probabilities.
"""
st.sidebar.markdown(generate_info)
SCREAMING_SNAKE_CASE = st.sidebar.selectbox("""Would you like to use beam search or sample an answer?""", ["""beam""", """sampled"""])
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Minimum generation length""", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Maximum generation length""", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
SCREAMING_SNAKE_CASE = st.sidebar.slider("""Beam size""", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Nucleus sampling p""", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE = st.sidebar.slider(
"""Temperature""", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
SCREAMING_SNAKE_CASE = None
# start main text
SCREAMING_SNAKE_CASE = [
"""<MY QUESTION>""",
"""How do people make chocolate?""",
"""Why do we get a fever when we are sick?""",
"""How can different animals perceive different colors?""",
"""What is natural language processing?""",
"""What's the best way to treat a sunburn?""",
"""What exactly are vitamins ?""",
"""How does nuclear energy provide electricity?""",
"""What's the difference between viruses and bacteria?""",
"""Why are flutes classified as woodwinds when most of them are made out of metal ?""",
"""Why do people like drinking coffee even though it tastes so bad?""",
"""What happens when wine ages? How does it make the wine taste better?""",
"""If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?""",
"""How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?""",
"""How does New Zealand have so many large bird predators?""",
]
SCREAMING_SNAKE_CASE = st.selectbox(
"""What would you like to ask? ---- select <MY QUESTION> to enter a new query""",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
SCREAMING_SNAKE_CASE = st.text_input("""Enter your question here:""", """""")
else:
SCREAMING_SNAKE_CASE = question_s
if st.button("""Show me!"""):
if action in [0, 1, 3]:
if index_type == "mixed":
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""dense""", n_results=10)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method="""sparse""", n_results=10)
SCREAMING_SNAKE_CASE = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
SCREAMING_SNAKE_CASE = support_list[:10]
SCREAMING_SNAKE_CASE = """<P> """ + """ <P> """.join([res[-1] for res in support_list])
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == """sampled"""),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("""### The model generated answer is:""")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("""--- \n ### The model is drawing information from the following Wikipedia passages:""")
for i, res in enumerate(support_list):
SCREAMING_SNAKE_CASE = """https://en.wikipedia.org/wiki/{}""".format(res[0].replace(""" """, """_"""))
SCREAMING_SNAKE_CASE = res[1].strip()
if sec_titles == "":
SCREAMING_SNAKE_CASE = """[{}]({})""".format(res[0], wiki_url)
else:
SCREAMING_SNAKE_CASE = sec_titles.split(""" & """)
SCREAMING_SNAKE_CASE = """ & """.join(
["""[{}]({}#{})""".format(sec.strip(), wiki_url, sec.strip().replace(""" """, """_""")) for sec in sec_list]
)
st.markdown(
"""{0:02d} - **Article**: {1:<18} <br> _Section_: {2}""".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"""> <span style=\"font-family:arial; font-size:10pt;\">""" + res[-1] + """</span>""", unsafe_allow_html=True
)
if action in [2, 3]:
SCREAMING_SNAKE_CASE = find_nearest_training(question)
SCREAMING_SNAKE_CASE = nn_train_list[0]
st.markdown(
"""--- \n ### The most similar question in the ELI5 training set was: \n\n {}""".format(train_exple["""title"""])
)
SCREAMING_SNAKE_CASE = [
"""{}. {}""".format(i + 1, """ \n""".join([line.strip() for line in ans.split("""\n""") if line.strip() != """"""]))
for i, (ans, sc) in enumerate(zip(train_exple["""answers"""]["""text"""], train_exple["""answers"""]["""score"""]))
if i == 0 or sc > 2
]
st.markdown("""##### Its answers were: \n\n {}""".format("""\n""".join(answers_st)))
SCREAMING_SNAKE_CASE = """
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
"""
st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
| 556
|
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __a ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCamelCase = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
UpperCamelCase = torch.tensor([[0, 581, 10_269, 83, 99_942, 136, 60_742, 23, 70, 80_583, 18_276, 2]] )
# The dog is cute and lives in the garden house
UpperCamelCase = torch.Size((1, 12, 1_024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCamelCase = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCamelCase = model(UpperCAmelCase_ )["last_hidden_state"].detach()
self.assertEqual(output.shape , UpperCAmelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , UpperCAmelCase_ , atol=1e-3 ) )
| 556
| 1
|
'''simple docstring'''
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = DistilBertTokenizer
_snake_case : Optional[int] = DistilBertTokenizerFast
_snake_case : List[str] = True
@slow
def snake_case__ ( self : int ) -> Dict:
'''simple docstring'''
_UpperCamelCase = DistilBertTokenizer.from_pretrained('''distilbert-base-uncased''' )
_UpperCamelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=lowerCAmelCase__ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 98
|
'''simple docstring'''
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowercase__ : str = '\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n'
lowercase__ : str = '\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting "1/2" to "\\frac{1}{2}") and then computes accuracy.\n'
lowercase__ : Dict = R'\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting "1/2" to "\\frac{1}{2}")\n\nExamples:\n >>> metric = datasets.load_metric("competition_math")\n >>> results = metric.compute(references=["\\frac{1}{2}"], predictions=["1/2"])\n >>> print(results)\n {\'accuracy\': 1.0}\n'
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
"""simple docstring"""
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/hendrycks/math''' , codebase_urls=['''https://github.com/hendrycks/math'''] , )
def snake_case__ ( self : List[Any] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = 0.0
for i, j in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
n_correct += 1.0 if math_equivalence.is_equiv(lowerCAmelCase__ , lowerCAmelCase__ ) else 0.0
_UpperCamelCase = n_correct / len(lowerCAmelCase__ )
return {
"accuracy": accuracy,
}
| 98
| 1
|
'''simple docstring'''
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
_UpperCamelCase : Dict = logging.getLogger(__name__)
def __UpperCAmelCase ( A : torch.nn.Module , A : BnbQuantizationConfig , A : Union[str, os.PathLike] = None , A : Optional[Dict[str, Union[int, str, torch.device]]] = None , A : Optional[List[str]] = None , A : Optional[Dict[Union[int, str], Union[int, str]]] = None , A : Optional[Union[str, os.PathLike]] = None , A : bool = False , ) -> Optional[Any]:
UpperCAmelCase_ : Dict = bnb_quantization_config.load_in_abit
UpperCAmelCase_ : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
UpperCAmelCase_ : List[str] = []
# custom device map
if isinstance(A , A ) and len(device_map.keys() ) > 1:
UpperCAmelCase_ : Any = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCAmelCase_ : Any = get_keys_to_not_convert(A )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A )
UpperCAmelCase_ : Optional[Any] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCAmelCase_ : List[str] = []
UpperCAmelCase_ : str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A )
# compatibility with peft
UpperCAmelCase_ : Any = load_in_abit
UpperCAmelCase_ : List[str] = load_in_abit
UpperCAmelCase_ : Tuple = get_parameter_device(A )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
UpperCAmelCase_ : List[str] = replace_with_bnb_layers(A , A , modules_to_not_convert=A )
# convert param to the right dtype
UpperCAmelCase_ : List[Any] = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCAmelCase_ : Optional[Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
UpperCAmelCase_ : Optional[int] = getattr(A , A , A )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A ):
param.to(A )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F"The model device type is {model_device.type}. However, cuda is needed for quantization."
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
UpperCAmelCase_ : Any = replace_with_bnb_layers(
A , A , modules_to_not_convert=A )
UpperCAmelCase_ : List[str] = get_quantized_model_device_map(
A , A , A , max_memory=A , no_split_module_classes=A , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCAmelCase_ : Any = True
UpperCAmelCase_ : Optional[Any] = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A , device_map=A , offload_dir=A )
def __UpperCAmelCase ( A : Any , A : Optional[Any] , A : Optional[Any]=None , A : List[Any]=None , A : Any=None ) -> int:
if device_map is None:
if torch.cuda.is_available():
UpperCAmelCase_ : Any = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A , A ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
UpperCAmelCase_ : List[str] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCAmelCase_ : str = {}
UpperCAmelCase_ : Dict = special_dtypes
UpperCAmelCase_ : List[Any] = no_split_module_classes
UpperCAmelCase_ : str = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCAmelCase_ : Optional[int] = get_balanced_memory(
A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , )
UpperCAmelCase_ : str = max_memory
UpperCAmelCase_ : Tuple = infer_auto_device_map(A , **A )
if isinstance(A , A ):
# check if don't have any quantized module on the cpu
UpperCAmelCase_ : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCAmelCase_ : Dict = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def __UpperCAmelCase ( A : List[str] , A : Union[str, Any] , A : Optional[int]=None , A : List[str]=None ) -> Optional[int]:
if modules_to_not_convert is None:
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = _replace_with_bnb_layers(
A , A , A , A )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __UpperCAmelCase ( A : Union[str, Any] , A : Optional[int] , A : Optional[int]=None , A : Optional[Any]=None , ) -> Any:
UpperCAmelCase_ : Dict = False
for name, module in model.named_children():
if current_key_name is None:
UpperCAmelCase_ : Optional[Any] = []
current_key_name.append(A )
if isinstance(A , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCAmelCase_ : Optional[Any] = '''.'''.join(A )
UpperCAmelCase_ : Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCAmelCase_ : Optional[int] = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCAmelCase_ : Dict = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCAmelCase_ : List[Any] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
UpperCAmelCase_ : List[Any] = module.weight.data
if module.bias is not None:
UpperCAmelCase_ : Optional[int] = module.bias.data
bnb_module.requires_grad_(A )
setattr(A , A , A )
UpperCAmelCase_ : int = True
if len(list(module.children() ) ) > 0:
UpperCAmelCase_ , UpperCAmelCase_ : int = _replace_with_bnb_layers(
A , A , A , A )
UpperCAmelCase_ : Optional[int] = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __UpperCAmelCase ( A : Any ) -> Union[str, Any]:
# Create a copy of the model
with init_empty_weights():
UpperCAmelCase_ : Any = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCAmelCase_ : Dict = find_tied_parameters(A )
# For compatibility with Accelerate < 0.18
if isinstance(A , A ):
UpperCAmelCase_ : int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCAmelCase_ : str = sum(A , [] )
UpperCAmelCase_ : List[Any] = len(A ) > 0
# Check if it is a base model
UpperCAmelCase_ : List[str] = False
if hasattr(A , '''base_model_prefix''' ):
UpperCAmelCase_ : str = not hasattr(A , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCAmelCase_ : str = list(model.named_children() )
UpperCAmelCase_ : List[str] = [list_modules[-1][0]]
# add last module together with tied weights
UpperCAmelCase_ : Tuple = set(A ) - set(A )
UpperCAmelCase_ : str = list(set(A ) ) + list(A )
# remove ".weight" from the keys
UpperCAmelCase_ : List[Any] = ['''.weight''', '''.bias''']
UpperCAmelCase_ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCAmelCase_ : Dict = name.replace(A , '''''' )
filtered_module_names.append(A )
return filtered_module_names
def __UpperCAmelCase ( A : Any ) -> Union[str, Any]:
for m in model.modules():
if isinstance(A , bnb.nn.Linearabit ):
return True
return False
def __UpperCAmelCase ( A : nn.Module ) -> int:
return next(parameter.parameters() ).device
def __UpperCAmelCase ( A : Union[str, Any] , A : int , A : str , A : Optional[int] , A : int , A : Optional[Any] , A : Union[str, Any] ) -> int:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(A , A , 0 , dtype=A , value=A )
UpperCAmelCase_ : List[Any] = param_name
UpperCAmelCase_ : int = model
if "." in tensor_name:
UpperCAmelCase_ : Optional[int] = tensor_name.split('''.''' )
for split in splits[:-1]:
UpperCAmelCase_ : Dict = getattr(A , A )
if new_module is None:
raise ValueError(F"{module} has no attribute {split}." )
UpperCAmelCase_ : List[str] = new_module
UpperCAmelCase_ : Tuple = splits[-1]
# offload weights
UpperCAmelCase_ : Optional[Any] = False
offload_weight(module._parameters[tensor_name] , A , A , index=A )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , )
else:
offload_weight(A , A , A , index=A )
offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A )
set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
| 216
|
'''simple docstring'''
_UpperCamelCase : Optional[int] = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __UpperCAmelCase ( A : str ) -> int:
UpperCAmelCase_ : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __UpperCAmelCase ( A : int ) -> str:
UpperCAmelCase_ : Any = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : Dict = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216
| 1
|
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_snake_case = logging.get_logger('transformers.models.speecht5')
_snake_case = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
_snake_case = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
_snake_case = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
_snake_case = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
_snake_case = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
_snake_case = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
_snake_case = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
_snake_case = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
_snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_snake_case = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_snake_case = []
_snake_case = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
_snake_case = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def _a ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Union[str, Any]:
"""simple docstring"""
for attribute in key.split('.' ):
__UpperCamelCase = getattr(__lowercase , __lowercase )
if weight_type is not None:
__UpperCamelCase = getattr(__lowercase , __lowercase ).shape
else:
__UpperCamelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__UpperCamelCase = value
elif weight_type == "weight_g":
__UpperCamelCase = value
elif weight_type == "weight_v":
__UpperCamelCase = value
elif weight_type == "bias":
__UpperCamelCase = value
elif weight_type == "running_mean":
__UpperCamelCase = value
elif weight_type == "running_var":
__UpperCamelCase = value
elif weight_type == "num_batches_tracked":
__UpperCamelCase = value
else:
__UpperCamelCase = value
logger.info(F"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _a ( __lowercase , __lowercase ) -> List[str]:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__UpperCamelCase , __UpperCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _a ( __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
__UpperCamelCase = []
if task == "s2t":
__UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCamelCase = MAPPING_S2T
__UpperCamelCase = IGNORE_KEYS_S2T
elif task == "t2s":
__UpperCamelCase = None
__UpperCamelCase = MAPPING_T2S
__UpperCamelCase = IGNORE_KEYS_T2S
elif task == "s2s":
__UpperCamelCase = hf_model.speechta.encoder.prenet.feature_encoder
__UpperCamelCase = MAPPING_S2S
__UpperCamelCase = IGNORE_KEYS_S2S
else:
raise ValueError(F"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__lowercase , __lowercase ):
logger.info(F"""{name} was ignored""" )
continue
__UpperCamelCase = False
if "conv_layers" in name:
load_conv_layer(
__lowercase , __lowercase , __lowercase , __lowercase , hf_model.config.feat_extract_norm == 'group' , )
__UpperCamelCase = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__UpperCamelCase , __UpperCamelCase = key.split('.*.' )
if prefix in name and suffix in name:
__UpperCamelCase = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__UpperCamelCase = True
if "*" in mapped_key:
__UpperCamelCase = name.split(__lowercase )[0].split('.' )[-2]
__UpperCamelCase = mapped_key.replace('*' , __lowercase )
if "weight_g" in name:
__UpperCamelCase = 'weight_g'
elif "weight_v" in name:
__UpperCamelCase = 'weight_v'
elif "bias" in name:
__UpperCamelCase = 'bias'
elif "weight" in name:
__UpperCamelCase = 'weight'
elif "running_mean" in name:
__UpperCamelCase = 'running_mean'
elif "running_var" in name:
__UpperCamelCase = 'running_var'
elif "num_batches_tracked" in name:
__UpperCamelCase = 'num_batches_tracked'
else:
__UpperCamelCase = None
set_recursively(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _a ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
__UpperCamelCase = full_name.split('conv_layers.' )[-1]
__UpperCamelCase = name.split('.' )
__UpperCamelCase = int(items[0] )
__UpperCamelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__UpperCamelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__UpperCamelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowercase )
@torch.no_grad()
def _a ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None , ) -> int:
"""simple docstring"""
if config_path is not None:
__UpperCamelCase = SpeechTaConfig.from_pretrained(__lowercase )
else:
__UpperCamelCase = SpeechTaConfig()
if task == "s2t":
__UpperCamelCase = config.max_text_positions
__UpperCamelCase = SpeechTaForSpeechToText(__lowercase )
elif task == "t2s":
__UpperCamelCase = 1876
__UpperCamelCase = 600
__UpperCamelCase = config.max_speech_positions
__UpperCamelCase = SpeechTaForTextToSpeech(__lowercase )
elif task == "s2s":
__UpperCamelCase = 1876
__UpperCamelCase = config.max_speech_positions
__UpperCamelCase = SpeechTaForSpeechToSpeech(__lowercase )
else:
raise ValueError(F"""Unknown task name: {task}""" )
if vocab_path:
__UpperCamelCase = SpeechTaTokenizer(__lowercase , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__UpperCamelCase = AddedToken('<mask>' , lstrip=__lowercase , rstrip=__lowercase )
__UpperCamelCase = mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__UpperCamelCase = SpeechTaFeatureExtractor()
__UpperCamelCase = SpeechTaProcessor(tokenizer=__lowercase , feature_extractor=__lowercase )
processor.save_pretrained(__lowercase )
__UpperCamelCase = torch.load(__lowercase )
recursively_load_weights(fairseq_checkpoint['model'] , __lowercase , __lowercase )
model.save_pretrained(__lowercase )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(__lowercase )
model.push_to_hub(__lowercase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_snake_case = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 383
|
import gc
import threading
import time
import psutil
import torch
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self ) -> Any:
__UpperCamelCase = psutil.Process()
__UpperCamelCase = False
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = -1
while True:
__UpperCamelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __lowercase( self ) -> Dict:
__UpperCamelCase = True
__UpperCamelCase = threading.Thread(target=self.peak_monitor )
__UpperCamelCase = True
self.thread.start()
def __lowercase( self ) -> List[str]:
__UpperCamelCase = False
self.thread.join()
return self.cpu_memory_peak
_snake_case = PeakCPUMemory()
def _a ( ) -> List[Any]:
"""simple docstring"""
__UpperCamelCase = {'time': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCamelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCamelCase = torch.cuda.memory_allocated(__lowercase )
torch.cuda.reset_peak_memory_stats()
return measures
def _a ( __lowercase ) -> Dict:
"""simple docstring"""
__UpperCamelCase = {'time': time.time() - start_measures['time']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__UpperCamelCase = (psutil.Process().memory_info().rss - start_measures['cpu']) / 2**20
__UpperCamelCase = (cpu_peak_tracker.stop() - start_measures['cpu']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__UpperCamelCase = (torch.cuda.memory_allocated(__lowercase ) - start_measures[str(__lowercase )]) / 2**20
__UpperCamelCase = (torch.cuda.max_memory_allocated(__lowercase ) - start_measures[str(__lowercase )]) / 2**20
return measures
def _a ( __lowercase , __lowercase ) -> Any:
"""simple docstring"""
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(__lowercase )]:.2f}MiB""" )
__UpperCamelCase = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 383
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : List[Any] = 1_0_0_0 ):
"""simple docstring"""
snake_case_ : Optional[int] = 3
snake_case_ : int = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 716
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a_ = logging.get_logger(__name__)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : int = ["""pixel_values"""]
def __init__(self , lowercase__ = True , lowercase__ = None , lowercase__ = 0.9 , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = True , lowercase__ = None , lowercase__ = 1 / 2_55 , lowercase__ = True , lowercase__ = True , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
super().__init__(**lowercase__ )
snake_case_ : Tuple = size if size is not None else {"""shortest_edge""": 2_24}
snake_case_ : Union[str, Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : str = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
snake_case_ : Dict = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : Union[str, Any] = do_resize
snake_case_ : List[str] = size
snake_case_ : str = crop_pct
snake_case_ : str = resample
snake_case_ : Optional[Any] = do_center_crop
snake_case_ : Dict = crop_size
snake_case_ : int = do_rescale
snake_case_ : Optional[int] = rescale_factor
snake_case_ : str = do_normalize
snake_case_ : str = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
snake_case_ : List[str] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , lowercase__ = PILImageResampling.BICUBIC , lowercase__ = None , **lowercase__ , ):
snake_case_ : Tuple = get_size_dict(lowercase__ , default_to_square=lowercase__ )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f'size must contain \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
if crop_pct is not None:
if "shortest_edge" in size:
snake_case_ : Optional[int] = int(size["""shortest_edge"""] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
snake_case_ : Dict = int(size["""height"""] / crop_pct )
else:
snake_case_ : List[str] = (int(size["""height"""] / crop_pct ), int(size["""width"""] / crop_pct ))
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
snake_case_ : List[Any] = get_resize_output_image_size(lowercase__ , size=lowercase__ , default_to_square=lowercase__ )
else:
if "shortest_edge" in size:
snake_case_ : Optional[int] = get_resize_output_image_size(lowercase__ , size=size["""shortest_edge"""] , default_to_square=lowercase__ )
elif "height" in size and "width" in size:
snake_case_ : int = (size["""height"""], size["""width"""])
else:
raise ValueError("""Invalid size for resize: {}""".format(lowercase__ ) )
return resize(lowercase__ , size=lowercase__ , resample=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
snake_case_ : int = get_size_dict(lowercase__ )
if "height" not in size or "width" not in size:
raise ValueError(f'size must contain \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(lowercase__ , size=(size["""height"""], size["""width"""]) , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return rescale(lowercase__ , scale=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ , lowercase__ , lowercase__ = None , **lowercase__ , ):
return normalize(lowercase__ , mean=lowercase__ , std=lowercase__ , data_format=lowercase__ , **lowercase__ )
def __UpperCamelCase (self , lowercase__ , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = None , lowercase__ = ChannelDimension.FIRST , **lowercase__ , ):
snake_case_ : str = do_resize if do_resize is not None else self.do_resize
snake_case_ : Any = crop_pct if crop_pct is not None else self.crop_pct
snake_case_ : List[Any] = resample if resample is not None else self.resample
snake_case_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case_ : str = do_rescale if do_rescale is not None else self.do_rescale
snake_case_ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case_ : str = do_normalize if do_normalize is not None else self.do_normalize
snake_case_ : List[Any] = image_mean if image_mean is not None else self.image_mean
snake_case_ : int = image_std if image_std is not None else self.image_std
snake_case_ : List[Any] = size if size is not None else self.size
snake_case_ : Optional[Any] = get_size_dict(lowercase__ , default_to_square=lowercase__ )
snake_case_ : List[Any] = crop_size if crop_size is not None else self.crop_size
snake_case_ : int = get_size_dict(lowercase__ , param_name="""crop_size""" )
snake_case_ : List[str] = make_list_of_images(lowercase__ )
if not valid_images(lowercase__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_pct is None:
raise ValueError("""Crop_pct must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
snake_case_ : int = [to_numpy_array(lowercase__ ) for image in images]
if do_resize:
snake_case_ : str = [self.resize(image=lowercase__ , size=lowercase__ , crop_pct=lowercase__ , resample=lowercase__ ) for image in images]
if do_center_crop:
snake_case_ : Optional[int] = [self.center_crop(image=lowercase__ , size=lowercase__ ) for image in images]
if do_rescale:
snake_case_ : List[Any] = [self.rescale(image=lowercase__ , scale=lowercase__ ) for image in images]
if do_normalize:
snake_case_ : Optional[Any] = [self.normalize(image=lowercase__ , mean=lowercase__ , std=lowercase__ ) for image in images]
snake_case_ : List[Any] = [to_channel_dimension_format(lowercase__ , lowercase__ ) for image in images]
snake_case_ : Dict = {"""pixel_values""": images}
return BatchFeature(data=lowercase__ , tensor_type=lowercase__ )
| 48
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
_a : List[Any] = logging.get_logger(__name__)
class lowercase_ ( a ):
'''simple docstring'''
def __init__( self , *a_ , **a_ ) -> None:
"""simple docstring"""
warnings.warn(
'The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DonutImageProcessor instead.' , a_ , )
super().__init__(*a_ , **a_ )
| 447
|
'''simple docstring'''
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
UpperCAmelCase = (boundary[1] - boundary[0]) / steps
UpperCAmelCase = boundary[0]
UpperCAmelCase = boundary[1]
UpperCAmelCase = make_points(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCAmelCase = 0.0
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE )
for i in x_i:
# print(i)
y += h * f(SCREAMING_SNAKE_CASE )
y += (h / 2.0) * f(SCREAMING_SNAKE_CASE )
return y
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = a + h
while x < (b - h):
yield x
UpperCAmelCase = x + h
def lowerCamelCase__ ( SCREAMING_SNAKE_CASE : Optional[Any] ): # enter your function here
UpperCAmelCase = (x - 0) * (x - 0)
return y
def lowerCamelCase__ ( ):
UpperCAmelCase = 0.0 # Lower bound of integration
UpperCAmelCase = 1.0 # Upper bound of integration
UpperCAmelCase = 10.0 # define number of steps or resolution
UpperCAmelCase = [a, b] # define boundary of integration
UpperCAmelCase = method_a(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(f'''y = {y}''' )
if __name__ == "__main__":
main()
| 447
| 1
|
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ = 4000000):
a__ = []
a__ ,a__ = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(lowerCamelCase_)
a__ ,a__ = b, a + b
return sum(lowerCamelCase_)
if __name__ == "__main__":
print(F'''{solution() = }''')
| 200
|
"""simple docstring"""
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'
)
__a : Union[str, Any] = None
__a : Union[str, Any] = {
'7B': 1_1008,
'13B': 1_3824,
'30B': 1_7920,
'65B': 2_2016,
'70B': 2_8672,
}
__a : List[Any] = {
'7B': 1,
'7Bf': 1,
'13B': 2,
'13Bf': 2,
'30B': 4,
'65B': 8,
'70B': 8,
'70Bf': 8,
}
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_=1 , lowerCamelCase_=256):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3)) + multiple_of - 1) // multiple_of)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_):
with open(lowerCamelCase_ , '''r''') as f:
return json.load(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_):
with open(lowerCamelCase_ , '''w''') as f:
json.dump(lowerCamelCase_ , lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=True):
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
a__ = os.path.join(lowerCamelCase_ , '''tmp''')
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_)
a__ = read_json(os.path.join(lowerCamelCase_ , '''params.json'''))
a__ = NUM_SHARDS[model_size]
a__ = params['''n_layers''']
a__ = params['''n_heads''']
a__ = n_heads // num_shards
a__ = params['''dim''']
a__ = dim // n_heads
a__ = 10000.0
a__ = 1.0 / (base ** (torch.arange(0 , lowerCamelCase_ , 2).float() / dims_per_head))
if "n_kv_heads" in params:
a__ = params['''n_kv_heads'''] # for GQA / MQA
a__ = n_heads_per_shard // num_key_value_heads
a__ = dim // num_key_value_heads
else: # compatibility with other checkpoints
a__ = n_heads
a__ = n_heads_per_shard
a__ = dim
# permute for sliced rotary
def permute(lowerCamelCase_ , lowerCamelCase_=n_heads , lowerCamelCase_=dim , lowerCamelCase_=dim):
return w.view(lowerCamelCase_ , dima // n_heads // 2 , 2 , lowerCamelCase_).transpose(1 , 2).reshape(lowerCamelCase_ , lowerCamelCase_)
print(f'Fetching all parameters from the checkpoint at {input_base_path}.')
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
a__ = torch.load(os.path.join(lowerCamelCase_ , '''consolidated.00.pth''') , map_location='''cpu''')
else:
# Sharded
a__ = [
torch.load(os.path.join(lowerCamelCase_ , f'consolidated.{i:02d}.pth') , map_location='''cpu''')
for i in range(lowerCamelCase_)
]
a__ = 0
a__ = {'''weight_map''': {}}
for layer_i in range(lowerCamelCase_):
a__ = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
a__ = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight']),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight']),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
a__ = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
a__ = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_))
a__ = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_) , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , )
a__ = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_)
for i in range(lowerCamelCase_)
] , dim=0 , ).reshape(lowerCamelCase_ , lowerCamelCase_)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(lowerCamelCase_)] , dim=1)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(lowerCamelCase_)] , dim=0)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(lowerCamelCase_)] , dim=1)
a__ = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(lowerCamelCase_)] , dim=0)
a__ = inv_freq
for k, v in state_dict.items():
a__ = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_))
a__ = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
a__ = {
'''model.embed_tokens.weight''': loaded['''tok_embeddings.weight'''],
'''model.norm.weight''': loaded['''norm.weight'''],
'''lm_head.weight''': loaded['''output.weight'''],
}
else:
a__ = {
'''model.norm.weight''': loaded[0]['''norm.weight'''],
'''model.embed_tokens.weight''': torch.cat(
[loaded[i]['''tok_embeddings.weight'''] for i in range(lowerCamelCase_)] , dim=1),
'''lm_head.weight''': torch.cat([loaded[i]['''output.weight'''] for i in range(lowerCamelCase_)] , dim=0),
}
for k, v in state_dict.items():
a__ = filename
param_count += v.numel()
torch.save(lowerCamelCase_ , os.path.join(lowerCamelCase_ , lowerCamelCase_))
# Write configs
a__ = {'''total_size''': param_count * 2}
write_json(lowerCamelCase_ , os.path.join(lowerCamelCase_ , '''pytorch_model.bin.index.json'''))
a__ = params['''ffn_dim_multiplier'''] if '''ffn_dim_multiplier''' in params else 1
a__ = params['''multiple_of'''] if '''multiple_of''' in params else 256
a__ = LlamaConfig(
hidden_size=lowerCamelCase_ , intermediate_size=compute_intermediate_size(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_) , num_attention_heads=params['''n_heads'''] , num_hidden_layers=params['''n_layers'''] , rms_norm_eps=params['''norm_eps'''] , num_key_value_heads=lowerCamelCase_ , )
config.save_pretrained(lowerCamelCase_)
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print('''Loading the checkpoint in a Llama model.''')
a__ = LlamaForCausalLM.from_pretrained(lowerCamelCase_ , torch_dtype=torch.floataa , low_cpu_mem_usage=lowerCamelCase_)
# Avoid saving this as part of the config.
del model.config._name_or_path
print('''Saving in the Transformers format.''')
model.save_pretrained(lowerCamelCase_ , safe_serialization=lowerCamelCase_)
shutil.rmtree(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( lowerCamelCase_ , lowerCamelCase_):
# Initialize the tokenizer based on the `spm` model
a__ = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.')
a__ = tokenizer_class(lowerCamelCase_)
tokenizer.save_pretrained(lowerCamelCase_)
def SCREAMING_SNAKE_CASE ( ):
a__ = argparse.ArgumentParser()
parser.add_argument(
'''--input_dir''' , help='''Location of LLaMA weights, which contains tokenizer.model and model folders''' , )
parser.add_argument(
'''--model_size''' , choices=['''7B''', '''7Bf''', '''13B''', '''13Bf''', '''30B''', '''65B''', '''70B''', '''70Bf''', '''tokenizer_only'''] , )
parser.add_argument(
'''--output_dir''' , help='''Location to write HF model and tokenizer''' , )
parser.add_argument('''--safe_serialization''' , type=lowerCamelCase_ , help='''Whether or not to save using `safetensors`.''')
a__ = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
a__ = os.path.join(args.input_dir , '''tokenizer.model''')
write_tokenizer(args.output_dir , lowerCamelCase_)
if __name__ == "__main__":
main()
| 200
| 1
|
import sys
__UpperCamelCase : Union[str, Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _UpperCAmelCase ( UpperCAmelCase : str = N ):
"""simple docstring"""
__lowerCamelCase : Optional[Any] = -sys.maxsize - 1
for i in range(len(UpperCAmelCase ) - 12 ):
__lowerCamelCase : int = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
__lowerCamelCase : List[Any] = product
return largest_product
if __name__ == "__main__":
print(F'''{solution() = }''')
| 519
|
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : int , _lowerCamelCase : Collection[float] | None = None ):
'''simple docstring'''
if components is None:
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Dict = list(_lowerCamelCase )
def __len__( self : int ):
'''simple docstring'''
return len(self.__components )
def __str__( self : Any ):
'''simple docstring'''
return "(" + ",".join(map(_lowerCamelCase , self.__components ) ) + ")"
def __add__( self : Union[str, Any] , _lowerCamelCase : Vector ):
'''simple docstring'''
__lowerCamelCase : Any = len(self )
if size == len(_lowerCamelCase ):
__lowerCamelCase : List[str] = [self.__components[i] + other.component(_lowerCamelCase ) for i in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
else:
raise Exception("""must have the same size""" )
def __sub__( self : List[str] , _lowerCamelCase : Vector ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = len(self )
if size == len(_lowerCamelCase ):
__lowerCamelCase : Tuple = [self.__components[i] - other.component(_lowerCamelCase ) for i in range(_lowerCamelCase )]
return Vector(_lowerCamelCase )
else: # error case
raise Exception("""must have the same size""" )
@overload
def __mul__( self : Tuple , _lowerCamelCase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : str , _lowerCamelCase : Vector ):
'''simple docstring'''
...
def __mul__( self : List[str] , _lowerCamelCase : float | Vector ):
'''simple docstring'''
if isinstance(_lowerCamelCase , (float, int) ):
__lowerCamelCase : List[Any] = [c * other for c in self.__components]
return Vector(_lowerCamelCase )
elif isinstance(_lowerCamelCase , _lowerCamelCase ) and len(self ) == len(_lowerCamelCase ):
__lowerCamelCase : List[str] = len(self )
__lowerCamelCase : int = [self.__components[i] * other.component(_lowerCamelCase ) for i in range(_lowerCamelCase )]
return sum(_lowerCamelCase )
else: # error case
raise Exception("""invalid operand!""" )
def _snake_case ( self : Optional[Any] ):
'''simple docstring'''
return Vector(self.__components )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : int ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("""index out of range""" )
def _snake_case ( self : List[str] , _lowerCamelCase : int , _lowerCamelCase : float ):
'''simple docstring'''
assert -len(self.__components ) <= pos < len(self.__components )
__lowerCamelCase : Any = value
def _snake_case ( self : Tuple ):
'''simple docstring'''
if len(self.__components ) == 0:
raise Exception("""Vector is empty""" )
__lowerCamelCase : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(_lowerCamelCase ) )
def _snake_case ( self : Dict , _lowerCamelCase : Vector , _lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCamelCase : List[str] = self * other
__lowerCamelCase : List[Any] = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase )
return Vector([0] * dimension )
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
assert isinstance(UpperCAmelCase , UpperCAmelCase ) and (isinstance(UpperCAmelCase , UpperCAmelCase ))
__lowerCamelCase : Optional[Any] = [0] * dimension
__lowerCamelCase : Union[str, Any] = 1
return Vector(UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : Vector , UpperCAmelCase : Vector ):
"""simple docstring"""
assert (
isinstance(UpperCAmelCase , UpperCAmelCase )
and isinstance(UpperCAmelCase , UpperCAmelCase )
and (isinstance(UpperCAmelCase , (int, float) ))
)
return x * scalar + y
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
random.seed(UpperCAmelCase )
__lowerCamelCase : str = [random.randint(UpperCAmelCase , UpperCAmelCase ) for _ in range(UpperCAmelCase )]
return Vector(UpperCAmelCase )
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCamelCase : list[list[float]] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
__lowerCamelCase : str = matrix
__lowerCamelCase : Optional[int] = w
__lowerCamelCase : List[Any] = h
def __str__( self : Any ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = """"""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , _lowerCamelCase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__lowerCamelCase : int = []
for i in range(self.__height ):
__lowerCamelCase : str = [
self.__matrix[i][j] + other.component(_lowerCamelCase , _lowerCamelCase )
for j in range(self.__width )
]
matrix.append(_lowerCamelCase )
return Matrix(_lowerCamelCase , self.__width , self.__height )
else:
raise Exception("""matrix must have the same dimension!""" )
def __sub__( self : str , _lowerCamelCase : Matrix ):
'''simple docstring'''
if self.__width == other.width() and self.__height == other.height():
__lowerCamelCase : List[Any] = []
for i in range(self.__height ):
__lowerCamelCase : List[Any] = [
self.__matrix[i][j] - other.component(_lowerCamelCase , _lowerCamelCase )
for j in range(self.__width )
]
matrix.append(_lowerCamelCase )
return Matrix(_lowerCamelCase , self.__width , self.__height )
else:
raise Exception("""matrices must have the same dimension!""" )
@overload
def __mul__( self : List[Any] , _lowerCamelCase : float ):
'''simple docstring'''
...
@overload
def __mul__( self : Optional[int] , _lowerCamelCase : Vector ):
'''simple docstring'''
...
def __mul__( self : Optional[Any] , _lowerCamelCase : float | Vector ):
'''simple docstring'''
if isinstance(_lowerCamelCase , _lowerCamelCase ): # matrix-vector
if len(_lowerCamelCase ) == self.__width:
__lowerCamelCase : List[str] = zero_vector(self.__height )
for i in range(self.__height ):
__lowerCamelCase : Optional[int] = [
self.__matrix[i][j] * other.component(_lowerCamelCase )
for j in range(self.__width )
]
ans.change_component(_lowerCamelCase , sum(_lowerCamelCase ) )
return ans
else:
raise Exception(
"""vector must have the same size as the """
"""number of columns of the matrix!""" )
elif isinstance(_lowerCamelCase , (int, float) ): # matrix-scalar
__lowerCamelCase : Union[str, Any] = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_lowerCamelCase , self.__width , self.__height )
return None
def _snake_case ( self : Tuple ):
'''simple docstring'''
return self.__height
def _snake_case ( self : Any ):
'''simple docstring'''
return self.__width
def _snake_case ( self : Any , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("""change_component: indices out of bounds""" )
def _snake_case ( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float ):
'''simple docstring'''
if 0 <= x < self.__height and 0 <= y < self.__width:
__lowerCamelCase : Optional[Any] = value
else:
raise Exception("""change_component: indices out of bounds""" )
def _snake_case ( self : Optional[Any] , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
__lowerCamelCase : Union[str, Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_lowerCamelCase ) ):
__lowerCamelCase : List[str] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_lowerCamelCase , self.__width - 1 , self.__height - 1 ).determinant()
def _snake_case ( self : Tuple , _lowerCamelCase : int , _lowerCamelCase : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_lowerCamelCase , _lowerCamelCase )
else:
raise Exception("""Indices out of bounds""" )
def _snake_case ( self : int ):
'''simple docstring'''
if self.__height != self.__width:
raise Exception("""Matrix is not square""" )
if self.__height < 1:
raise Exception("""Matrix has no element""" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
__lowerCamelCase : int = [
self.__matrix[0][y] * self.cofactor(0 , _lowerCamelCase ) for y in range(self.__width )
]
return sum(_lowerCamelCase )
def _UpperCAmelCase ( UpperCAmelCase : int ):
"""simple docstring"""
__lowerCamelCase : list[list[float]] = [[0] * n for _ in range(UpperCAmelCase )]
return Matrix(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def _UpperCAmelCase ( UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int ):
"""simple docstring"""
random.seed(UpperCAmelCase )
__lowerCamelCase : list[list[float]] = [
[random.randint(UpperCAmelCase , UpperCAmelCase ) for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )
]
return Matrix(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
| 519
| 1
|
from __future__ import annotations
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : int = data
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
def _snake_case ( lowercase__ ): # In Order traversal of the tree
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def _snake_case ( lowercase__ ):
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def _snake_case ( lowercase__ ):
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def _snake_case ( ): # Main function for testing.
_lowerCamelCase : Optional[int] = Node(1 )
_lowerCamelCase : Optional[Any] = Node(2 )
_lowerCamelCase : List[Any] = Node(3 )
_lowerCamelCase : int = Node(4 )
_lowerCamelCase : str = Node(5 )
_lowerCamelCase : List[Any] = Node(6 )
_lowerCamelCase : Any = Node(7 )
_lowerCamelCase : List[str] = Node(8 )
_lowerCamelCase : Dict = Node(9 )
print(is_full_binary_tree(a_ ) )
print(depth_of_tree(a_ ) )
print('Tree is: ' )
display(a_ )
if __name__ == "__main__":
main()
| 718
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""google/realm-cc-news-pretrained-embedder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-encoder""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-scorer""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json"""
),
"""google/realm-cc-news-pretrained-openqa""": (
"""https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json"""
),
"""google/realm-orqa-nq-openqa""": """https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json""",
"""google/realm-orqa-nq-reader""": """https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json""",
"""google/realm-orqa-wq-openqa""": """https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json""",
"""google/realm-orqa-wq-reader""": """https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json""",
# See all REALM models at https://huggingface.co/models?filter=realm
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """realm"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=128 , lowercase=12 , lowercase=12 , lowercase=8 , lowercase=3072 , lowercase="gelu_new" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=256 , lowercase=10 , lowercase=1E-3 , lowercase=5 , lowercase=320 , lowercase=13353718 , lowercase=5000 , lowercase=1 , lowercase=0 , lowercase=2 , **lowercase , ):
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
# Common config
_lowerCamelCase : str = vocab_size
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : Optional[Any] = retriever_proj_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Any = num_attention_heads
_lowerCamelCase : int = num_candidates
_lowerCamelCase : List[Any] = intermediate_size
_lowerCamelCase : int = hidden_act
_lowerCamelCase : Union[str, Any] = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = initializer_range
_lowerCamelCase : List[Any] = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Reader config
_lowerCamelCase : Tuple = span_hidden_size
_lowerCamelCase : int = max_span_width
_lowerCamelCase : Tuple = reader_layer_norm_eps
_lowerCamelCase : Union[str, Any] = reader_beam_size
_lowerCamelCase : Union[str, Any] = reader_seq_len
# Retrieval config
_lowerCamelCase : Optional[Any] = num_block_records
_lowerCamelCase : str = searcher_beam_size
| 492
| 0
|
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Union[str, Any] , a :str = "" , a :bool = False ) -> None:
# Mapping from the first character of the prefix of the node
__UpperCamelCase : dict[str, RadixNode] = {}
# A node will be a leaf if the tree contains its word
__UpperCamelCase : Tuple = is_leaf
__UpperCamelCase : Tuple = prefix
def _lowerCamelCase ( self :Tuple , a :str ) -> tuple[str, str, str]:
__UpperCamelCase : Dict = 0
for q, w in zip(self.prefix , a ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def _lowerCamelCase ( self :Union[str, Any] , a :list[str] ) -> None:
for word in words:
self.insert(a )
def _lowerCamelCase ( self :List[Any] , a :str ) -> None:
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__UpperCamelCase : Dict = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__UpperCamelCase : Any = RadixNode(prefix=a , is_leaf=a )
else:
__UpperCamelCase : str = self.nodes[word[0]]
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = incoming_node.match(
a )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(a )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__UpperCamelCase : Union[str, Any] = remaining_prefix
__UpperCamelCase : Union[str, Any] = self.nodes[matching_string[0]]
__UpperCamelCase : List[Any] = RadixNode(a , a )
__UpperCamelCase : Optional[int] = aux_node
if remaining_word == "":
__UpperCamelCase : Optional[Any] = True
else:
self.nodes[matching_string[0]].insert(a )
def _lowerCamelCase ( self :Tuple , a :str ) -> bool:
__UpperCamelCase : Union[str, Any] = self.nodes.get(word[0] , a )
if not incoming_node:
return False
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[str] = incoming_node.match(
a )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(a )
def _lowerCamelCase ( self :int , a :str ) -> bool:
__UpperCamelCase : Optional[Any] = self.nodes.get(word[0] , a )
if not incoming_node:
return False
else:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase : Any = incoming_node.match(
a )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(a )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__UpperCamelCase : str = list(self.nodes.values() )[0]
__UpperCamelCase : Union[str, Any] = merging_node.is_leaf
self.prefix += merging_node.prefix
__UpperCamelCase : Dict = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__UpperCamelCase : int = False
# If there is 1 edge, we merge it with its child
else:
__UpperCamelCase : Tuple = list(incoming_node.nodes.values() )[0]
__UpperCamelCase : List[str] = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__UpperCamelCase : Union[str, Any] = merging_node.nodes
return True
def _lowerCamelCase ( self :Tuple , a :int = 0 ) -> None:
if self.prefix != "":
print("-" * height , self.prefix , " (leaf)" if self.is_leaf else "" )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _SCREAMING_SNAKE_CASE ( ) -> bool:
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = "banana bananas bandana band apple all beast".split()
__UpperCamelCase : Tuple = RadixNode()
root.insert_many(_lowerCamelCase)
assert all(root.find(_lowerCamelCase) for word in words)
assert not root.find("bandanas")
assert not root.find("apps")
root.delete("all")
assert not root.find("all")
root.delete("banana")
assert not root.find("banana")
assert root.find("bananas")
return True
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
assert test_trie()
def _SCREAMING_SNAKE_CASE ( ) -> None:
'''simple docstring'''
__UpperCamelCase : List[Any] = RadixNode()
__UpperCamelCase : List[Any] = "banana bananas bandanas bandana band apple all beast".split()
root.insert_many(_lowerCamelCase)
print("Words:" , _lowerCamelCase)
print("Tree:")
root.print_tree()
if __name__ == "__main__":
main()
| 557
|
from queue import Queue
from typing import TYPE_CHECKING, Optional
if TYPE_CHECKING:
from ..models.auto import AutoTokenizer
class lowerCamelCase__ :
'''simple docstring'''
def _lowerCamelCase ( self :Optional[Any] , a :int ) -> Any:
raise NotImplementedError()
def _lowerCamelCase ( self :Any ) -> Optional[int]:
raise NotImplementedError()
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :List[Any] , a :"AutoTokenizer" , a :bool = False , **a :Tuple ) -> Union[str, Any]:
__UpperCamelCase : int = tokenizer
__UpperCamelCase : Any = skip_prompt
__UpperCamelCase : Union[str, Any] = decode_kwargs
# variables used in the streaming process
__UpperCamelCase : Union[str, Any] = []
__UpperCamelCase : str = 0
__UpperCamelCase : str = True
def _lowerCamelCase ( self :List[str] , a :Union[str, Any] ) -> Any:
if len(value.shape ) > 1 and value.shape[0] > 1:
raise ValueError("TextStreamer only supports batch size 1" )
elif len(value.shape ) > 1:
__UpperCamelCase : Optional[int] = value[0]
if self.skip_prompt and self.next_tokens_are_prompt:
__UpperCamelCase : Optional[Any] = False
return
# Add the new token to the cache and decodes the entire thing.
self.token_cache.extend(value.tolist() )
__UpperCamelCase : Optional[int] = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
# After the symbol for a new line, we flush the cache.
if text.endswith("\n" ):
__UpperCamelCase : Optional[int] = text[self.print_len :]
__UpperCamelCase : Dict = []
__UpperCamelCase : int = 0
# If the last token is a CJK character, we print the characters.
elif len(a ) > 0 and self._is_chinese_char(ord(text[-1] ) ):
__UpperCamelCase : Optional[int] = text[self.print_len :]
self.print_len += len(a )
# Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words,
# which may change with the subsequent token -- there are probably smarter ways to do this!)
else:
__UpperCamelCase : Any = text[self.print_len : text.rfind(" " ) + 1]
self.print_len += len(a )
self.on_finalized_text(a )
def _lowerCamelCase ( self :List[str] ) -> List[str]:
# Flush the cache, if it exists
if len(self.token_cache ) > 0:
__UpperCamelCase : str = self.tokenizer.decode(self.token_cache , **self.decode_kwargs )
__UpperCamelCase : str = text[self.print_len :]
__UpperCamelCase : str = []
__UpperCamelCase : int = 0
else:
__UpperCamelCase : int = ""
__UpperCamelCase : Optional[Any] = True
self.on_finalized_text(a , stream_end=a )
def _lowerCamelCase ( self :Union[str, Any] , a :str , a :bool = False ) -> str:
print(a , flush=a , end="" if not stream_end else None )
def _lowerCamelCase ( self :Optional[int] , a :Optional[Any] ) -> str:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4e_00 and cp <= 0x9f_ff)
or (cp >= 0x34_00 and cp <= 0x4d_bf) #
or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) #
or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) #
or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) #
or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) #
or (cp >= 0xf9_00 and cp <= 0xfa_ff)
or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) #
): #
return True
return False
class lowerCamelCase__ ( __lowercase):
'''simple docstring'''
def __init__( self :Any , a :"AutoTokenizer" , a :bool = False , a :Optional[float] = None , **a :Dict ) -> str:
super().__init__(a , a , **a )
__UpperCamelCase : str = Queue()
__UpperCamelCase : Dict = None
__UpperCamelCase : Optional[int] = timeout
def _lowerCamelCase ( self :List[Any] , a :str , a :bool = False ) -> Dict:
self.text_queue.put(a , timeout=self.timeout )
if stream_end:
self.text_queue.put(self.stop_signal , timeout=self.timeout )
def __iter__( self :Tuple ) -> Tuple:
return self
def _lowerCamelCase ( self :List[Any] ) -> Dict:
__UpperCamelCase : List[Any] = self.text_queue.get(timeout=self.timeout )
if value == self.stop_signal:
raise StopIteration()
else:
return value
| 557
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Optional[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def snake_case ( self : int ):
lowercase__ : Union[str, Any] = 1
lowercase__ : List[str] = 3
lowercase__ : Dict = (32, 32)
lowercase__ : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
return image
@property
def snake_case ( self : int ):
torch.manual_seed(0 )
lowercase__ : Any = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
return model
@property
def snake_case ( self : str ):
torch.manual_seed(0 )
lowercase__ : str = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def snake_case ( self : Tuple ):
torch.manual_seed(0 )
lowercase__ : List[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_006 , )
return RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : Any ):
def extract(*SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : List[Any] ):
class snake_case__:
"""simple docstring"""
def __init__( self : Tuple ):
lowercase__ : List[str] = torch.ones([0] )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
self.pixel_values.to(SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def snake_case ( self : Optional[int] ):
lowercase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase__ : List[str] = self.dummy_cond_unet
lowercase__ : str = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
lowercase__ : int = self.dummy_vae
lowercase__ : Optional[int] = self.dummy_text_encoder
lowercase__ : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase__ : List[str] = 77
lowercase__ : int = self.dummy_image.to(SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase__ : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
lowercase__ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE )
lowercase__ : int = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = "A painting of a squirrel eating a burger"
lowercase__ : List[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
lowercase__ : Any = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=SCREAMING_SNAKE_CASE , )
lowercase__ : Union[str, Any] = output.images
lowercase__ : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
lowercase__ : int = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
lowercase__ : Optional[Any] = image[0, -3:, -3:, -1]
lowercase__ : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase__ : Dict = np.array([0.4_427, 0.3_731, 0.4_249, 0.4_941, 0.4_546, 0.4_148, 0.4_193, 0.4_666, 0.4_499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case ( self : Tuple ):
lowercase__ : Tuple = self.dummy_cond_unet
lowercase__ : Tuple = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = self.dummy_vae
lowercase__ : List[str] = self.dummy_text_encoder
lowercase__ : List[Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
lowercase__ : int = 77
lowercase__ : Tuple = self.dummy_image.to(SCREAMING_SNAKE_CASE )
# put models in fp16
lowercase__ : List[str] = unet.half()
lowercase__ : Union[str, Any] = vae.half()
lowercase__ : List[str] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase__ : List[str] = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
lowercase__ : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE )
lowercase__ : int = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "A painting of a squirrel eating a burger"
lowercase__ : str = torch.manual_seed(0 )
lowercase__ : Union[str, Any] = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="np" , image=SCREAMING_SNAKE_CASE , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def snake_case ( self : List[Any] ):
lowercase__ : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase__ : Any = init_image.resize((760, 504) )
lowercase__ : List[Any] = "BAAI/AltDiffusion"
lowercase__ : Dict = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : List[Any] = "A fantasy landscape, trending on artstation"
lowercase__ : Any = torch.manual_seed(0 )
lowercase__ : int = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , output_type="np" , )
lowercase__ : Optional[Any] = output.images[0]
lowercase__ : int = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase__ : Any = np.array([0.9_358, 0.9_397, 0.9_599, 0.9_901, 1.0_000, 1.0_000, 0.9_882, 1.0_000, 1.0_000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case ( self : Optional[Any] ):
lowercase__ : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
lowercase__ : Tuple = init_image.resize((768, 512) )
lowercase__ : Any = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
lowercase__ : Any = "BAAI/AltDiffusion"
lowercase__ : Optional[int] = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
lowercase__ : Optional[int] = "A fantasy landscape, trending on artstation"
lowercase__ : List[str] = torch.manual_seed(0 )
lowercase__ : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , output_type="np" , )
lowercase__ : List[Any] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 81
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'''configuration_mgp_str''': ['''MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MgpstrConfig'''],
'''processing_mgp_str''': ['''MgpstrProcessor'''],
'''tokenization_mgp_str''': ['''MgpstrTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'''MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MgpstrModel''',
'''MgpstrPreTrainedModel''',
'''MgpstrForSceneTextRecognition''',
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 81
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCAmelCase :
def __init__( self : List[str] , __lowerCamelCase : Any , __lowerCamelCase : Tuple=1_3 , __lowerCamelCase : str=6_4 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Optional[Any]=3_2 , __lowerCamelCase : Tuple=5 , __lowerCamelCase : List[str]=4 , __lowerCamelCase : List[Any]=3_7 , __lowerCamelCase : str="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : str=0.1 , __lowerCamelCase : Tuple=1_0 , __lowerCamelCase : List[str]=0.0_2 , __lowerCamelCase : int=[1, 1_6, 4, 4] , __lowerCamelCase : Optional[int]=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = is_training
_snake_case = use_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = scope
_snake_case = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
_snake_case = (self.image_size // 3_2) ** 2
_snake_case = num_patches + 1
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [4, 8, 1_6, 3_2],
'''num_groups''': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__lowerCamelCase , )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = ViTHybridModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
"""simple docstring"""
_snake_case = self.type_sequence_label_size
_snake_case = ViTHybridForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
_snake_case = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : List[str] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
A__ : Optional[int] = (
{'''feature-extraction''': ViTHybridModel, '''image-classification''': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
A__ : Dict = False
A__ : List[Any] = False
A__ : Optional[int] = False
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case = ViTHybridModelTester(self )
_snake_case = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
pass
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(__lowerCamelCase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
_snake_case = _config_zero_init(__lowerCamelCase )
for model_class in self.all_model_classes:
_snake_case = model_class(config=__lowerCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
_snake_case = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = ViTHybridModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def snake_case ( ) -> List[Any]:
_snake_case = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowerCamelCase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowerCamelCase , return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
_snake_case = model(**__lowerCamelCase )
# verify the logits
_snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
_snake_case = torch.tensor([-1.9_0_9_0, -0.4_9_9_3, -0.2_3_8_9] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = ViTHybridImageProcessor.from_pretrained('''google/vit-hybrid-base-bit-384''' )
_snake_case = ViTHybridForImageClassification.from_pretrained('''google/vit-hybrid-base-bit-384''' , device_map='''auto''' )
_snake_case = prepare_img()
_snake_case = image_processor(images=__lowerCamelCase , return_tensors='''pt''' )
_snake_case = model(**__lowerCamelCase )
_snake_case = outputs.logits
# model predicts one of the 1000 ImageNet classes
_snake_case = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , '''tabby, tabby cat''' )
| 103
|
'''simple docstring'''
def lowerCAmelCase_ ( _lowerCamelCase: list ):
if len(_lowerCamelCase ) < 2:
return collection
def circle_sort_util(_lowerCamelCase: list , _lowerCamelCase: int , _lowerCamelCase: int ) -> bool:
__SCREAMING_SNAKE_CASE : Any = False
if low == high:
return swapped
__SCREAMING_SNAKE_CASE : Any = low
__SCREAMING_SNAKE_CASE : Dict = high
while left < right:
if collection[left] > collection[right]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = (
collection[right],
collection[left],
)
__SCREAMING_SNAKE_CASE : str = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = (
collection[right + 1],
collection[left],
)
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : Union[str, Any] = low + int((high - low) / 2 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = circle_sort_util(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = circle_sort_util(_lowerCamelCase , mid + 1 , _lowerCamelCase )
return swapped or left_swap or right_swap
__SCREAMING_SNAKE_CASE : Optional[Any] = True
while is_not_sorted is True:
__SCREAMING_SNAKE_CASE : Tuple = circle_sort_util(_lowerCamelCase , 0 , len(_lowerCamelCase ) - 1 )
return collection
if __name__ == "__main__":
UpperCamelCase__ : Optional[int] = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ : List[Any] = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 578
| 0
|
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( A__ : str=2 , A__ : Any=3 , A__ : int=16 , A__ : int = 10 , A__ : int = 2 ):
'''simple docstring'''
def get_dataset(A__ : List[Any] ):
__lowerCamelCase = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(_UpperCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__lowerCamelCase = get_dataset(_UpperCamelCase )
__lowerCamelCase = get_dataset(_UpperCamelCase )
__lowerCamelCase = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
__lowerCamelCase = DataLoader(_UpperCamelCase , shuffle=_UpperCamelCase , batch_size=_UpperCamelCase , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCamelCase__ ( A__ : str , A__ : Dict , A__ : Optional[int] , A__ : Optional[int] , A__ : str , A__ : Tuple=None ):
'''simple docstring'''
__lowerCamelCase = []
for epoch in range(_UpperCamelCase ):
# Train quickly
model.train()
for batch in dataloader:
__lowerCamelCase, __lowerCamelCase = batch
__lowerCamelCase = model(_UpperCamelCase )
__lowerCamelCase = torch.nn.functional.mse_loss(_UpperCamelCase , _UpperCamelCase )
accelerator.backward(_UpperCamelCase )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCamelCase__( nn.Module):
def __init__( self: List[str] ):
super().__init__()
__lowerCamelCase = nn.Parameter(torch.randn(1 ) )
__lowerCamelCase = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Any ):
return x * self.a + self.b
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase_ , automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCAmelCase__ ( self: Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
# Train baseline
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
__lowerCamelCase = os.path.join(UpperCamelCase_ , """initial""" )
accelerator.save_state(UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
__lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = Accelerator()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
__lowerCamelCase = os.path.join(UpperCamelCase_ , """checkpoint""" )
accelerator.save_state(UpperCamelCase_ )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase_ )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
__lowerCamelCase = train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
# Train partially
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase_ )
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = train(2 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_1""" ) )
test_rands += train(1 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
((__lowerCamelCase), (__lowerCamelCase)) = model.a.item(), model.b.item()
__lowerCamelCase = optimizer.state_dict()
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = torch.tensor([1, 2, 3] )
__lowerCamelCase = torch.tensor([2, 3, 4] )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(net.parameters() )
__lowerCamelCase = Accelerator()
with self.assertRaises(UpperCamelCase_ ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
__lowerCamelCase = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def lowerCAmelCase__ ( self: Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__lowerCamelCase = torch.optim.lr_scheduler.StepLR(UpperCamelCase_ , step_size=1 , gamma=0.99 )
__lowerCamelCase, __lowerCamelCase = dummy_dataloaders()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save initial
accelerator.save_state()
__lowerCamelCase = scheduler.state_dict()
train(3 , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
self.assertNotEqual(UpperCamelCase_ , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) )
self.assertEqual(UpperCamelCase_ , scheduler.state_dict() )
def lowerCAmelCase__ ( self: str ):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__lowerCamelCase = DummyModel()
__lowerCamelCase = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase_ , total_limit=2 )
# Train baseline
__lowerCamelCase = Accelerator(project_dir=UpperCamelCase_ , project_config=UpperCamelCase_ )
__lowerCamelCase = accelerator.prepare(UpperCamelCase_ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase_ , """checkpoints""" , """checkpoint_10""" ) ) )
@require_cuda
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = ["""torchrun""", F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase_ , env=os.environ.copy() )
if __name__ == "__main__":
UpperCAmelCase_ = '/tmp/accelerate/state_checkpointing'
UpperCAmelCase_ = DummyModel()
UpperCAmelCase_ = torch.optim.Adam(params=model.parameters(), lr=1E-3)
UpperCAmelCase_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCAmelCase_ , UpperCAmelCase_ = dummy_dataloaders()
UpperCAmelCase_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCAmelCase_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCAmelCase_ , UpperCAmelCase_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
UpperCAmelCase_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
UpperCAmelCase_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 715
|
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class lowerCamelCase__:
def __init__( self: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[Any]=14 , UpperCamelCase_: int=7 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: Tuple=True , UpperCamelCase_: List[str]=True , UpperCamelCase_: int=99 , UpperCamelCase_: str=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: Optional[int]=4 , UpperCamelCase_: List[Any]=37 , UpperCamelCase_: Optional[int]="gelu" , UpperCamelCase_: Tuple=0.1 , UpperCamelCase_: Optional[Any]=0.1 , UpperCamelCase_: List[str]=5_12 , UpperCamelCase_: Dict=16 , UpperCamelCase_: List[str]=2 , UpperCamelCase_: Optional[Any]=0.02 , UpperCamelCase_: List[str]=3 , UpperCamelCase_: Tuple=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_labels
__lowerCamelCase = use_mc_token_ids
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
if self.use_mc_token_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.num_choices] , self.seq_length )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = self.get_config()
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def lowerCAmelCase__ ( self: Dict ):
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Dict , UpperCamelCase_: Tuple , UpperCamelCase_: Any , UpperCamelCase_: List[str] , *UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = CTRLModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ )
model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(len(result.past_key_values ) , config.n_layer )
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: List[Any] , *UpperCamelCase_: Tuple ):
__lowerCamelCase = CTRLLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
), (
__lowerCamelCase
),
) = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """head_mask""": head_mask}
return config, inputs_dict
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Dict , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: Union[str, Any] , *UpperCamelCase_: Union[str, Any] ):
__lowerCamelCase = self.num_labels
__lowerCamelCase = CTRLForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Any = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
UpperCAmelCase__ : Optional[Any] = (CTRLLMHeadModel,) if is_torch_available() else ()
UpperCAmelCase__ : int = (
{
'feature-extraction': CTRLModel,
'text-classification': CTRLForSequenceClassification,
'text-generation': CTRLLMHeadModel,
'zero-shot': CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCAmelCase__ : List[str] = True
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Optional[Any] = False
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Any , UpperCamelCase_: List[str] , UpperCamelCase_: Tuple , UpperCamelCase_: Tuple , UpperCamelCase_: List[str] ):
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = CTRLModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , n_embd=37 )
def lowerCAmelCase__ ( self: Optional[int] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*UpperCamelCase_ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCAmelCase__ ( self: List[Any] ):
pass
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = CTRLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def lowerCAmelCase__ ( self: Optional[Any] ):
pass
@require_torch
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[str] ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = CTRLLMHeadModel.from_pretrained("""ctrl""" )
model.to(UpperCamelCase_ )
__lowerCamelCase = torch.tensor(
[[1_18_59, 0, 16_11, 8]] , dtype=torch.long , device=UpperCamelCase_ ) # Legal the president is
__lowerCamelCase = [
1_18_59,
0,
16_11,
8,
5,
1_50,
2_64_49,
2,
19,
3_48,
4_69,
3,
25_95,
48,
2_07_40,
24_65_33,
24_65_33,
19,
30,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowerCamelCase = model.generate(UpperCamelCase_ , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , UpperCamelCase_ )
| 80
| 0
|
def __lowerCamelCase ( UpperCAmelCase_ : list[int] ):
"""simple docstring"""
if not numbers:
return 0
if not isinstance(UpperCAmelCase_ , (list, tuple) ) or not all(
isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) for number in numbers ):
raise ValueError('''numbers must be an iterable of integers''' )
a :Optional[Any] = numbers[0]
for i in range(1 , len(UpperCAmelCase_ ) ):
# update the maximum and minimum subarray products
a :int = numbers[i]
if number < 0:
a , a :Optional[Any] = min_till_now, max_till_now
a :Tuple = max(UpperCAmelCase_ , max_till_now * number )
a :str = min(UpperCAmelCase_ , min_till_now * number )
# update the maximum product found till now
a :str = max(UpperCAmelCase_ , UpperCAmelCase_ )
return max_prod
| 445
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
snake_case : Any = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Any = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
snake_case : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 445
| 1
|
"""simple docstring"""
from ....utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class __lowercase ( __lowerCamelCase ):
def __init__( self : List[Any] ,A : List[Any] ,A : Optional[Any]=None ,A : str=2_048 ):
'''simple docstring'''
UpperCAmelCase__ : Dict = config.__dict__
UpperCAmelCase__ : List[Any] = modal_hidden_size
if num_labels:
UpperCAmelCase__ : List[Any] = num_labels
| 194
|
"""simple docstring"""
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
if "model" in orig_key:
UpperCAmelCase__ : List[str] = orig_key.replace("""model.""" , """""" )
if "norm1" in orig_key:
UpperCAmelCase__ : List[Any] = orig_key.replace("""norm1""" , """attention.output.LayerNorm""" )
if "norm2" in orig_key:
UpperCAmelCase__ : Optional[Any] = orig_key.replace("""norm2""" , """output.LayerNorm""" )
if "norm" in orig_key:
UpperCAmelCase__ : int = orig_key.replace("""norm""" , """LayerNorm""" )
if "transformer" in orig_key:
UpperCAmelCase__ : Any = orig_key.split(""".""" )[0].split("""_""" )[-1]
UpperCAmelCase__ : Dict = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" )
if "mha.attn" in orig_key:
UpperCAmelCase__ : Tuple = orig_key.replace("""mha.attn""" , """attention.self""" )
if "mha" in orig_key:
UpperCAmelCase__ : Dict = orig_key.replace("""mha""" , """attention""" )
if "W_q" in orig_key:
UpperCAmelCase__ : str = orig_key.replace("""W_q""" , """self.query""" )
if "W_k" in orig_key:
UpperCAmelCase__ : List[Any] = orig_key.replace("""W_k""" , """self.key""" )
if "W_v" in orig_key:
UpperCAmelCase__ : str = orig_key.replace("""W_v""" , """self.value""" )
if "ff1" in orig_key:
UpperCAmelCase__ : Optional[int] = orig_key.replace("""ff1""" , """intermediate.dense""" )
if "ff2" in orig_key:
UpperCAmelCase__ : Dict = orig_key.replace("""ff2""" , """output.dense""" )
if "ff" in orig_key:
UpperCAmelCase__ : Any = orig_key.replace("""ff""" , """output.dense""" )
if "mlm_class" in orig_key:
UpperCAmelCase__ : Any = orig_key.replace("""mlm.mlm_class""" , """cls.predictions.decoder""" )
if "mlm" in orig_key:
UpperCAmelCase__ : Optional[Any] = orig_key.replace("""mlm""" , """cls.predictions.transform""" )
if "cls" not in orig_key:
UpperCAmelCase__ : Dict = """yoso.""" + orig_key
return orig_key
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = orig_state_dict.pop(__UpperCamelCase )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
UpperCAmelCase__ : Dict = val
UpperCAmelCase__ : Dict = orig_state_dict["""cls.predictions.decoder.bias"""]
UpperCAmelCase__ : Optional[Any] = torch.arange(__UpperCamelCase ).expand((1, -1) ) + 2
return orig_state_dict
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = torch.load(__UpperCamelCase , map_location="""cpu""" )["""model_state_dict"""]
UpperCAmelCase__ : List[str] = YosoConfig.from_json_file(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = YosoForMaskedLM(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = convert_checkpoint_helper(config.max_position_embeddings , __UpperCamelCase )
print(model.load_state_dict(__UpperCamelCase ) )
model.eval()
model.save_pretrained(__UpperCamelCase )
print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--pytorch_model_path', default=None, type=str, required=True, help='Path to YOSO pytorch checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The json file for YOSO model config.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__UpperCAmelCase = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 194
| 1
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
_UpperCAmelCase : int = logging.getLogger(__name__)
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple ) -> int:
_A = False
def __UpperCAmelCase ( self : Optional[int], UpperCamelCase__ : str, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : int, UpperCamelCase__ : Optional[Any] ) -> Any:
if not self.initialized:
_A = RagRetriever(
UpperCamelCase__, question_encoder_tokenizer=UpperCamelCase__, generator_tokenizer=UpperCamelCase__, index=UpperCamelCase__, init_retrieval=UpperCamelCase__, )
_A = True
def __UpperCAmelCase ( self : Any ) -> List[Any]:
self.retriever.index.init_index()
def __UpperCAmelCase ( self : int, UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Dict ) -> Optional[Any]:
_A , _A = self.retriever._main_retrieve(UpperCamelCase__, UpperCamelCase__ )
return doc_ids, retrieved_doc_embeds
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Dict, UpperCamelCase__ : Union[str, Any], UpperCamelCase__ : int=None ) -> Optional[int]:
if index is not None and index.is_initialized() and len(UpperCamelCase__ ) > 0:
raise ValueError(
'When using Ray for distributed fine-tuning, '
'you\'ll need to provide the paths instead, '
'as the dataset and the index are loaded '
'separately. More info in examples/rag/use_own_knowledge_dataset.py ' )
super().__init__(
UpperCamelCase__, question_encoder_tokenizer=UpperCamelCase__, generator_tokenizer=UpperCamelCase__, index=UpperCamelCase__, init_retrieval=UpperCamelCase__, )
_A = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
for worker in self.retrieval_workers
] )
def __UpperCAmelCase ( self : Dict ) -> int:
logger.info('initializing retrieval' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def __UpperCAmelCase ( self : Optional[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : Tuple ) -> Dict:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_A = self.retrieval_workers[random.randint(0, len(self.retrieval_workers ) - 1 )]
_A , _A = ray.get(random_worker.retrieve.remote(UpperCamelCase__, UpperCamelCase__ ) )
else:
_A , _A = self._main_retrieve(UpperCamelCase__, UpperCamelCase__ )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(UpperCamelCase__ )
@classmethod
def __UpperCAmelCase ( cls : List[str], UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any]=None, **UpperCamelCase__ : Optional[int] ) -> Optional[Any]:
return super(UpperCamelCase__, cls ).get_tokenizers(UpperCamelCase__, UpperCamelCase__, **UpperCamelCase__ )
@classmethod
def __UpperCAmelCase ( cls : Any, UpperCamelCase__ : Tuple, UpperCamelCase__ : int, UpperCamelCase__ : List[Any]=None, **UpperCamelCase__ : List[str] ) -> Tuple:
_A = kwargs.pop('config', UpperCamelCase__ ) or RagConfig.from_pretrained(UpperCamelCase__, **UpperCamelCase__ )
_A = RagTokenizer.from_pretrained(UpperCamelCase__, config=UpperCamelCase__ )
_A = rag_tokenizer.question_encoder
_A = rag_tokenizer.generator
if indexed_dataset is not None:
_A = 'custom'
_A = CustomHFIndex(config.retrieval_vector_size, UpperCamelCase__ )
else:
_A = cls._build_index(UpperCamelCase__ )
return cls(
UpperCamelCase__, question_encoder_tokenizer=UpperCamelCase__, generator_tokenizer=UpperCamelCase__, retrieval_workers=UpperCamelCase__, index=UpperCamelCase__, )
| 107
|
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( snake_case_ : float , snake_case_ : float , snake_case_ : int ) -> float:
__snake_case = x
__snake_case = y
for step in range(snake_case_ ): # noqa: B007
__snake_case = a * a - b * b + x
__snake_case = 2 * a * b + y
__snake_case = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( snake_case_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCamelCase__ ( snake_case_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(snake_case_ , 1 , 1 ) )
def lowerCamelCase__ ( snake_case_ : int = 800 , snake_case_ : int = 600 , snake_case_ : float = -0.6 , snake_case_ : float = 0 , snake_case_ : float = 3.2 , snake_case_ : int = 50 , snake_case_ : bool = True , ) -> Image.Image:
__snake_case = Image.new('''RGB''' , (image_width, image_height) )
__snake_case = img.load()
# loop through the image-coordinates
for image_x in range(snake_case_ ):
for image_y in range(snake_case_ ):
# determine the figure-coordinates based on the image-coordinates
__snake_case = figure_width / image_width * image_height
__snake_case = figure_center_x + (image_x / image_width - 0.5) * figure_width
__snake_case = figure_center_y + (image_y / image_height - 0.5) * figure_height
__snake_case = get_distance(snake_case_ , snake_case_ , snake_case_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__snake_case = get_color_coded_rgb(snake_case_ )
else:
__snake_case = get_black_and_white_rgb(snake_case_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
snake_case_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 592
| 0
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE ( a_ : NDArray[floataa] , a_ : NDArray[floataa] , a_ : list[int] , a_ : int , ):
__a , __a = coefficient_matrix.shape
__a , __a = constant_matrix.shape
if rowsa != colsa:
__a = f"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(a_ )
if colsa != 1:
__a = f"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(a_ )
if rowsa != rowsa:
__a = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
f"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(a_ )
if len(a_ ) != rowsa:
__a = (
'Number of initial values must be equal to number of rows in coefficient '
f"matrix but received {len(a_ )} and {rowsa}"
)
raise ValueError(a_ )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
__a = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__a , __a = table.shape
strictly_diagonally_dominant(a_ )
# Iterates the whole matrix for given number of times
for _ in range(a_ ):
__a = []
for row in range(a_ ):
__a = 0
for col in range(a_ ):
if col == row:
__a = table[row][col]
elif col == cols - 1:
__a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__a = (temp + val) / denom
new_val.append(a_ )
__a = new_val
return [float(a_ ) for i in new_val]
def SCREAMING_SNAKE_CASE ( a_ : NDArray[floataa] ):
__a , __a = table.shape
__a = True
for i in range(0 , a_ ):
__a = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 490
|
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( a_ : int ):
__a = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def SCREAMING_SNAKE_CASE ( a_ : int = 100 ):
__a = 1
__a = 2
for i in range(2 , max_n + 1 ):
__a = pre_numerator
__a = 2 * i // 3 if i % 3 == 0 else 1
__a = cur_numerator
__a = e_cont * pre_numerator + temp
return sum_digits(a_ )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 490
| 1
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def __snake_case ( SCREAMING_SNAKE_CASE__ : Any=None ) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(add_help=SCREAMING_SNAKE_CASE__ , allow_abbrev=SCREAMING_SNAKE_CASE__ )
# The main config parser
_UpperCAmelCase : Tuple = config_command_parser(SCREAMING_SNAKE_CASE__ )
# The subparser to add commands to
_UpperCAmelCase : str = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(SCREAMING_SNAKE_CASE__ , parents=[parent_parser] )
update_command_parser(SCREAMING_SNAKE_CASE__ , parents=[parent_parser] )
return config_parser
def __snake_case ( ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int = get_config_parser()
_UpperCAmelCase : str = config_parser.parse_args()
if not hasattr(SCREAMING_SNAKE_CASE__ , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
main()
| 289
|
"""simple docstring"""
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : Union[str, Any] , A : Optional[Any] ):
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["bs"] , model_result["ss"] ):
_UpperCAmelCase : List[str] = model_result["result"][batch_size][sequence_length]
self.assertIsNotNone(A )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase : Any = "sshleifer/tiny-gpt2"
_UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : List[str] = "sgugger/tiny-distilbert-classification"
_UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , only_pretrain_model=A , )
_UpperCAmelCase : Dict = PyTorchBenchmark(A )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : int ):
_UpperCAmelCase : Any = "sshleifer/tiny-gpt2"
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , torchscript=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : Dict = PyTorchBenchmark(A )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Union[str, Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , fpaa=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[str] = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : int = AutoConfig.from_pretrained(A )
# set architectures equal to `None`
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : int ):
_UpperCAmelCase : Dict = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : Tuple = PyTorchBenchmark(A )
_UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == "cpu" , "Can't do half precision" )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[int] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=A , multi_process=A , )
_UpperCAmelCase : Tuple = PyTorchBenchmark(A )
_UpperCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(A )
_UpperCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : List[Any] = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : str ):
_UpperCAmelCase : List[str] = "sshleifer/tinier_bart"
_UpperCAmelCase : Any = AutoConfig.from_pretrained(A )
_UpperCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def snake_case_ ( self : Any ):
_UpperCAmelCase : Tuple = "sshleifer/tiny-gpt2"
_UpperCAmelCase : Tuple = AutoConfig.from_pretrained(A )
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : str ):
_UpperCAmelCase : Optional[int] = "sshleifer/tinier_bart"
_UpperCAmelCase : int = AutoConfig.from_pretrained(A )
_UpperCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=A , )
_UpperCAmelCase : str = PyTorchBenchmark(A , configs=[config] )
_UpperCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : int = "sshleifer/tiny-gpt2"
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , save_to_csv=A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(A , "inf_time.csv" ) , train_memory_csv_file=os.path.join(A , "train_mem.csv" ) , inference_memory_csv_file=os.path.join(A , "inf_mem.csv" ) , train_time_csv_file=os.path.join(A , "train_time.csv" ) , env_info_csv_file=os.path.join(A , "env.csv" ) , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A )
benchmark.run()
self.assertTrue(Path(os.path.join(A , "inf_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "train_time.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "inf_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "train_mem.csv" ) ).exists() )
self.assertTrue(Path(os.path.join(A , "env.csv" ) ).exists() )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : List[Any] = "sshleifer/tiny-gpt2"
def _check_summary_is_not_empty(A : int ):
self.assertTrue(hasattr(A , "sequential" ) )
self.assertTrue(hasattr(A , "cumulative" ) )
self.assertTrue(hasattr(A , "current" ) )
self.assertTrue(hasattr(A , "total" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=A , inference=A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(A , "log.txt" ) , log_print=A , trace_memory_line_by_line=A , multi_process=A , )
_UpperCAmelCase : int = PyTorchBenchmark(A )
_UpperCAmelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(A , "log.txt" ) ).exists() )
| 289
| 1
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowerCamelCase ( lowercase , lowercase , unittest.TestCase ):
lowerCamelCase__: Optional[int] = IFImgaImgSuperResolutionPipeline
lowerCamelCase__: List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
lowerCamelCase__: int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
lowerCamelCase__: str = PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def A__ ( self , __snake_case , __snake_case=0 ) -> Optional[Any]:
"""simple docstring"""
if str(__snake_case ).startswith("mps" ):
UpperCAmelCase: Dict = torch.manual_seed(__snake_case )
else:
UpperCAmelCase: List[Any] = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
UpperCAmelCase: Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase: Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(__snake_case ) ).to(__snake_case )
UpperCAmelCase: Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"original_image": original_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def A__ ( self ) -> List[Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def A__ ( self ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def A__ ( self ) -> str:
"""simple docstring"""
self._test_save_load_local()
def A__ ( self ) -> List[Any]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 166
|
import copy
from typing import Dict, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
from ..detr import DetrConfig
from ..swin import SwinConfig
snake_case_ : Optional[int] = {
'facebook/maskformer-swin-base-ade': (
'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json'
)
# See all MaskFormer models at https://huggingface.co/models?filter=maskformer
}
snake_case_ : Optional[int] = logging.get_logger(__name__)
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Union[str, Any] = '''maskformer'''
lowerCamelCase__: Optional[int] = {'''hidden_size''': '''mask_feature_size'''}
lowerCamelCase__: Optional[int] = ['''resnet''', '''swin''']
lowerCamelCase__: Optional[int] = ['''detr''']
def __init__( self , __snake_case = 2_5_6 , __snake_case = 2_5_6 , __snake_case = 0.1 , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 0.02 , __snake_case = 1.0 , __snake_case = 1.0 , __snake_case = 1.0 , __snake_case = 20.0 , __snake_case = None , **__snake_case , ) -> List[Any]:
"""simple docstring"""
if backbone_config is None:
# fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k
UpperCAmelCase: str = SwinConfig(
image_size=3_8_4 , in_channels=3 , patch_size=4 , embed_dim=1_2_8 , depths=[2, 2, 1_8, 2] , num_heads=[4, 8, 1_6, 3_2] , window_size=1_2 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: Union[str, Any] = backbone_config.pop("model_type" )
UpperCAmelCase: List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase: Any = config_class.from_dict(__snake_case )
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
F'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. '
F'Supported model types: {",".join(self.backbones_supported )}' )
if decoder_config is None:
# fall back to https://huggingface.co/facebook/detr-resnet-50
UpperCAmelCase: Tuple = DetrConfig()
else:
# verify that the decoder is supported
UpperCAmelCase: Dict = (
decoder_config.pop("model_type" ) if isinstance(__snake_case , __snake_case ) else decoder_config.model_type
)
if decoder_type not in self.decoders_supported:
raise ValueError(
F'Transformer Decoder {decoder_type} not supported, please use one of'
F' {",".join(self.decoders_supported )}' )
if isinstance(__snake_case , __snake_case ):
UpperCAmelCase: Union[str, Any] = CONFIG_MAPPING[decoder_type]
UpperCAmelCase: Optional[Any] = config_class.from_dict(__snake_case )
UpperCAmelCase: Any = backbone_config
UpperCAmelCase: Dict = decoder_config
# main feature dimension for the model
UpperCAmelCase: Optional[int] = fpn_feature_size
UpperCAmelCase: Union[str, Any] = mask_feature_size
# initializer
UpperCAmelCase: Tuple = init_std
UpperCAmelCase: Union[str, Any] = init_xavier_std
# Hungarian matcher && loss
UpperCAmelCase: Optional[int] = cross_entropy_weight
UpperCAmelCase: List[str] = dice_weight
UpperCAmelCase: List[Any] = mask_weight
UpperCAmelCase: List[str] = use_auxiliary_loss
UpperCAmelCase: List[str] = no_object_weight
UpperCAmelCase: int = output_auxiliary_logits
UpperCAmelCase: int = self.decoder_config.encoder_attention_heads
UpperCAmelCase: Dict = self.decoder_config.num_hidden_layers
super().__init__(**__snake_case )
@classmethod
def A__ ( cls , __snake_case , __snake_case , **__snake_case ) -> Tuple:
"""simple docstring"""
return cls(
backbone_config=__snake_case , decoder_config=__snake_case , **__snake_case , )
def A__ ( self ) -> Dict[str, any]:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase: Union[str, Any] = self.backbone_config.to_dict()
UpperCAmelCase: List[Any] = self.decoder_config.to_dict()
UpperCAmelCase: Dict = self.__class__.model_type
return output
| 166
| 1
|
"""simple docstring"""
_lowerCAmelCase : List[Any] = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCamelCase_( _lowerCamelCase ) -> List[Any]:
'''simple docstring'''
_lowerCamelCase : Dict = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_lowerCamelCase : List[str] = Stack()
_lowerCamelCase : Any = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_lowerCamelCase ) )
elif i in operators:
# RULE 2
operator_stack.push(_lowerCamelCase )
elif i == ")":
# RULE 4
_lowerCamelCase : Tuple = operator_stack.peek()
operator_stack.pop()
_lowerCamelCase : int = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
_lowerCamelCase : str = operators[opr](_lowerCamelCase , _lowerCamelCase )
operand_stack.push(_lowerCamelCase )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 46
|
__lowercase = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
A_ = Stack()
A_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE )
elif i == ")":
# RULE 4
A_ = operator_stack.peek()
operator_stack.pop()
A_ = operand_stack.peek()
operand_stack.pop()
A_ = operand_stack.peek()
operand_stack.pop()
A_ = operators[opr](SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
operand_stack.push(SCREAMING_SNAKE_CASE )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__lowercase = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(f'{equation} = {dijkstras_two_stack_algorithm(equation)}')
| 203
| 0
|
"""simple docstring"""
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase = logging.get_logger(__name__)
def _a ( _snake_case , _snake_case ):
"""simple docstring"""
UpperCAmelCase = RobertaPreLayerNormConfig.from_pretrained(
lowerCamelCase_ , architectures=["""RobertaPreLayerNormForMaskedLM"""] )
# convert state_dict
UpperCAmelCase = torch.load(hf_hub_download(repo_id=lowerCamelCase_ , filename="""pytorch_model.bin""" ) )
UpperCAmelCase = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith("""roberta.""" ):
UpperCAmelCase = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ):
continue
UpperCAmelCase = tensor_value
UpperCAmelCase = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
# convert tokenizer
UpperCAmelCase = AutoTokenizer.from_pretrained(lowerCamelCase_ )
tokenizer.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_UpperCamelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 712
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase__ :
def __init__( self ,A ,):
UpperCAmelCase = parent
UpperCAmelCase = 13
UpperCAmelCase = 7
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = 2
UpperCAmelCase = 99
UpperCAmelCase = 0
UpperCAmelCase = 32
UpperCAmelCase = 2
UpperCAmelCase = 4
UpperCAmelCase = 0.1
UpperCAmelCase = 0.1
UpperCAmelCase = 512
UpperCAmelCase = 16
UpperCAmelCase = 2
UpperCAmelCase = 0.02
UpperCAmelCase = 3
UpperCAmelCase = 4
UpperCAmelCase = """last"""
UpperCAmelCase = True
UpperCAmelCase = None
UpperCAmelCase = 0
def _UpperCamelCase ( self ):
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase = None
if self.use_input_lengths:
UpperCAmelCase = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertModel(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
UpperCAmelCase = [input_ids, input_mask]
UpperCAmelCase = model(A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertWithLMHeadModel(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForQuestionAnsweringSimple(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = TFFlaubertForSequenceClassification(A )
UpperCAmelCase = {"""input_ids""": input_ids, """lengths""": input_lengths}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_labels
UpperCAmelCase = TFFlaubertForTokenClassification(config=A )
UpperCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ,A ,A ,A ,A ,):
UpperCAmelCase = self.num_choices
UpperCAmelCase = TFFlaubertForMultipleChoice(config=A )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = tf.tile(tf.expand_dims(A ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase = model(A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase__ ( snake_case , snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
SCREAMING_SNAKE_CASE = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _UpperCamelCase ( self ,A ,A ,A ,A ,A ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=A ,emb_dim=37 )
def _UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*A )
def _UpperCamelCase ( self ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*A )
@slow
def _UpperCamelCase ( self ):
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFFlaubertModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def _UpperCamelCase ( self ):
UpperCAmelCase = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
UpperCAmelCase = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase = model(A )[0]
UpperCAmelCase = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,A )
# compare the actual values for a slice.
UpperCAmelCase = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 74
| 0
|
from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :List[Any] = [randint(-1_000 , 1_000 ) for i in range(10 )]
__UpperCamelCase :Dict = randint(-5_000 , 5_000 )
return (arr, r)
__lowercase = make_dataset()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
for triplet in permutations(lowercase_ , 3 ):
if sum(lowercase_ ) == target:
return tuple(sorted(lowercase_ ) )
return (0, 0, 0)
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
arr.sort()
__UpperCamelCase :Union[str, Any] = len(lowercase_ )
for i in range(n - 1 ):
__UpperCamelCase :Tuple = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = '''
from __main__ import dataset, triplet_sum1, triplet_sum2
'''
__UpperCamelCase :int = '''
triplet_sum1(*dataset)
'''
__UpperCamelCase :Union[str, Any] = '''
triplet_sum2(*dataset)
'''
__UpperCamelCase :Any = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=10_000 )
__UpperCamelCase :Dict = repeat(setup=lowercase_ , stmt=lowercase_ , repeat=5 , number=10_000 )
return (min(lowercase_ ), min(lowercase_ ))
if __name__ == "__main__":
from doctest import testmod
testmod()
__lowercase = solution_times()
print(F'The time for naive implementation is {times[0]}.')
print(F'The time for optimized implementation is {times[1]}.')
| 167
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
lowerCAmelCase = random.Random()
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=1.0 , lowercase_=None , lowercase_=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__UpperCAmelCase : str = global_rng
__UpperCAmelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowerCamelCase ( unittest.TestCase ):
def __init__( self , lowercase__ , lowercase__=7 , lowercase__=4_0_0 , lowercase__=2_0_0_0 , lowercase__=2_0_4_8 , lowercase__=1_2_8 , lowercase__=1 , lowercase__=5_1_2 , lowercase__=3_0 , lowercase__=4_4_1_0_0 , ):
__UpperCAmelCase : Optional[Any] = parent
__UpperCAmelCase : Any = batch_size
__UpperCAmelCase : int = min_seq_length
__UpperCAmelCase : List[str] = max_seq_length
__UpperCAmelCase : Dict = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__UpperCAmelCase : Any = spectrogram_length
__UpperCAmelCase : List[Any] = feature_size
__UpperCAmelCase : Union[str, Any] = num_audio_channels
__UpperCAmelCase : Optional[int] = hop_length
__UpperCAmelCase : Tuple = chunk_length
__UpperCAmelCase : Any = sampling_rate
def A( self):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def A( self , lowercase__=False , lowercase__=False):
def _flatten(lowercase__):
return list(itertools.chain(*lowercase__))
if equal_length:
__UpperCAmelCase : str = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
__UpperCAmelCase : List[str] = [
floats_list((x, self.feature_size))
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff)
]
if numpify:
__UpperCAmelCase : List[str] = [np.asarray(lowercase__) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowerCamelCase ( _UpperCamelCase , unittest.TestCase ):
_lowerCAmelCase : Optional[int] = TvltFeatureExtractor
def A( self):
__UpperCAmelCase : Dict = TvltFeatureExtractionTester(self)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
self.assertTrue(hasattr(lowercase__ , '''spectrogram_length'''))
self.assertTrue(hasattr(lowercase__ , '''feature_size'''))
self.assertTrue(hasattr(lowercase__ , '''num_audio_channels'''))
self.assertTrue(hasattr(lowercase__ , '''hop_length'''))
self.assertTrue(hasattr(lowercase__ , '''chunk_length'''))
self.assertTrue(hasattr(lowercase__ , '''sampling_rate'''))
def A( self):
__UpperCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : str = feat_extract_first.save_pretrained(lowercase__)[0]
check_json_file_has_correct_format(lowercase__)
__UpperCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase__)
__UpperCAmelCase : List[Any] = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Union[str, Any] = dict_first.pop('''mel_filters''')
__UpperCAmelCase : Union[str, Any] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Union[str, Any] = os.path.join(lowercase__ , '''feat_extract.json''')
feat_extract_first.to_json_file(lowercase__)
__UpperCAmelCase : str = self.feature_extraction_class.from_json_file(lowercase__)
__UpperCAmelCase : Any = feat_extract_first.to_dict()
__UpperCAmelCase : Union[str, Any] = feat_extract_second.to_dict()
__UpperCAmelCase : Tuple = dict_first.pop('''mel_filters''')
__UpperCAmelCase : List[str] = dict_second.pop('''mel_filters''')
self.assertTrue(np.allclose(lowercase__ , lowercase__))
self.assertEqual(lowercase__ , lowercase__)
def A( self):
# Initialize feature_extractor
__UpperCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict)
# create three inputs of length 800, 1000, and 1200
__UpperCAmelCase : Optional[int] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)]
__UpperCAmelCase : int = [np.asarray(lowercase__) for speech_input in speech_inputs]
# Test not batched input
__UpperCAmelCase : Dict = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test batched
__UpperCAmelCase : List[str] = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test audio masking
__UpperCAmelCase : Tuple = feature_extractor(
lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=lowercase__).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
# Test 2-D numpy arrays are batched.
__UpperCAmelCase : Any = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)]
__UpperCAmelCase : Optional[Any] = np.asarray(lowercase__)
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''np''' , sampling_rate=4_4_1_0_0).audio_values
self.assertTrue(encoded_audios.ndim == 4)
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size)
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length)
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels)
def A( self , lowercase__):
__UpperCAmelCase : Optional[int] = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''')
# automatic decoding with librispeech
__UpperCAmelCase : int = ds.sort('''id''').select(range(lowercase__))[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def A( self):
__UpperCAmelCase : Optional[Any] = self._load_datasamples(1)
__UpperCAmelCase : Tuple = TvltFeatureExtractor()
__UpperCAmelCase : Tuple = feature_extractor(lowercase__ , return_tensors='''pt''').audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8))
__UpperCAmelCase : int = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]])
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , lowercase__ , atol=1e-4))
| 462
| 0
|
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , a , a=13 , a=7 , a=True , a=True , a=True , a=True , a=99 , a=32 , a=5 , a=4 , a=37 , a="gelu" , a=0.1 , a=0.1 , a=5_12 , a=16 , a=2 , a=0.02 , a=4 , ) -> str:
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_attention_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_choices
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = None
if self.use_attention_mask:
snake_case_ = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def _UpperCamelCase ( self ) -> str:
snake_case_ = self.prepare_config_and_inputs()
snake_case_ = config_and_inputs
snake_case_ = True
snake_case_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class UpperCamelCase_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = FlaxBertModelTester(self )
@slow
def _UpperCamelCase ( self ) -> List[str]:
snake_case_ = FlaxBertModel.from_pretrained('bert-base-cased' )
snake_case_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
| 704
|
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , *a , **a ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , a , )
super().__init__(*a , **a )
| 607
| 0
|
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
lowercase_ = logging.get_logger(__name__)
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : str = field(metadata={'help': 'The name of the task to train on: ' + ', '.join(glue_processors.keys() )} )
__lowerCamelCase : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
__lowerCamelCase : int = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase : bool = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
_a = self.task_name.lower()
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : List[Any] = 'train'
__lowerCamelCase : Tuple = 'dev'
__lowerCamelCase : Tuple = 'test'
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : GlueDataTrainingArguments
__lowerCamelCase : str
__lowerCamelCase : List[InputFeatures]
def __init__(self , A , A , A = None , A = Split.train , A = None , ) -> Optional[Any]:
"""simple docstring"""
warnings.warn(
'''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '''
'''library. You can have a look at this example script for pointers: '''
'''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , A , )
_a = args
_a = glue_processors[args.task_name]()
_a = glue_output_modes[args.task_name]
if isinstance(A , A ):
try:
_a = Split[mode]
except KeyError:
raise KeyError('''mode is not a valid split name''' )
# Load data features from cache or dataset file
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
_a = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
_a , _a = label_list[2], label_list[1]
_a = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + '''.lock'''
with FileLock(A ):
if os.path.exists(A ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(A )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
_a = self.processor.get_test_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
_a = examples[:limit_length]
_a = glue_convert_examples_to_features(
A , A , max_length=args.max_seq_length , label_list=A , output_mode=self.output_mode , )
_a = time.time()
torch.save(self.features , A )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__(self ) -> Any:
"""simple docstring"""
return len(self.features )
def __getitem__(self , A ) -> InputFeatures:
"""simple docstring"""
return self.features[i]
def a__ (self ) -> int:
"""simple docstring"""
return self.label_list
| 11
|
'''simple docstring'''
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase (__A , __A , __A):
"""simple docstring"""
if isinstance(__A , torch.Tensor):
return image
elif isinstance(__A , PIL.Image.Image):
_a = [image]
if isinstance(image[0] , PIL.Image.Image):
_a = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
_a = np.concatenate(__A , axis=0)
_a = np.array(__A).astype(np.floataa) / 2_55.0
_a = image.transpose(0 , 3 , 1 , 2)
_a = 2.0 * image - 1.0
_a = torch.from_numpy(__A)
elif isinstance(image[0] , torch.Tensor):
_a = torch.cat(__A , dim=0)
return image
def lowerCAmelCase (__A , __A , __A , __A=0.99_95):
"""simple docstring"""
if not isinstance(__A , np.ndarray):
_a = True
_a = va.device
_a = va.cpu().numpy()
_a = va.cpu().numpy()
_a = np.sum(va * va / (np.linalg.norm(__A) * np.linalg.norm(__A)))
if np.abs(__A) > DOT_THRESHOLD:
_a = (1 - t) * va + t * va
else:
_a = np.arccos(__A)
_a = np.sin(__A)
_a = theta_a * t
_a = np.sin(__A)
_a = np.sin(theta_a - theta_t) / sin_theta_a
_a = sin_theta_t / sin_theta_a
_a = sa * va + sa * va
if inputs_are_torch:
_a = torch.from_numpy(__A).to(__A)
return va
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = F.normalize(__A , dim=-1)
_a = F.normalize(__A , dim=-1)
return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
for param in model.parameters():
_a = value
class __A ( A ):
'''simple docstring'''
def __init__(self , A , A , A , A , A , A , A , A=None , A=None , A=None , ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(
vae=A , text_encoder=A , clip_model=A , tokenizer=A , unet=A , scheduler=A , feature_extractor=A , coca_model=A , coca_tokenizer=A , coca_transform=A , )
_a = (
feature_extractor.size
if isinstance(feature_extractor.size , A )
else feature_extractor.size['''shortest_edge''']
)
_a = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , A )
set_requires_grad(self.clip_model , A )
def a__ (self , A = "auto" ) -> Union[str, Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_a = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.enable_attention_slicing(A )
def a__ (self ) -> int:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Union[str, Any]:
"""simple docstring"""
set_requires_grad(self.vae , A )
def a__ (self ) -> Dict:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self ) -> str:
"""simple docstring"""
set_requires_grad(self.unet , A )
def a__ (self , A , A , A ) -> Optional[Any]:
"""simple docstring"""
_a = min(int(num_inference_steps * strength ) , A )
_a = max(num_inference_steps - init_timestep , 0 )
_a = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def a__ (self , A , A , A , A , A , A=None ) -> List[str]:
"""simple docstring"""
if not isinstance(A , torch.Tensor ):
raise ValueError(f'''`image` has to be of type `torch.Tensor` but is {type(A )}''' )
_a = image.to(device=A , dtype=A )
if isinstance(A , A ):
_a = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(A )
]
_a = torch.cat(A , dim=0 )
else:
_a = self.vae.encode(A ).latent_dist.sample(A )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 0.18215 * init_latents
_a = init_latents.repeat_interleave(A , dim=0 )
_a = randn_tensor(init_latents.shape , generator=A , device=A , dtype=A )
# get latents
_a = self.scheduler.add_noise(A , A , A )
_a = init_latents
return latents
def a__ (self , A ) -> Tuple:
"""simple docstring"""
_a = self.coca_transform(A ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
_a = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
_a = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('''<end_of_text>''' )[0].replace('''<start_of_text>''' , '''''' ).rstrip(''' .,''' )
def a__ (self , A , A ) -> List[Any]:
"""simple docstring"""
_a = self.feature_extractor.preprocess(A )
_a = torch.from_numpy(clip_image_input['''pixel_values'''][0] ).unsqueeze(0 ).to(self.device ).half()
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = image_embeddings_clip.repeat_interleave(A , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def a__ (self , A , A , A , A , A , A , A , ) -> Union[str, Any]:
"""simple docstring"""
_a = latents.detach().requires_grad_()
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
_a = self.scheduler.alphas_cumprod[timestep]
_a = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
_a = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
_a = torch.sqrt(A )
_a = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , A ):
_a = self.scheduler.sigmas[index]
_a = latents - sigma * noise_pred
else:
raise ValueError(f'''scheduler type {type(self.scheduler )} not supported''' )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * sample
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = transforms.Resize(self.feature_extractor_size )(A )
_a = self.normalize(A ).to(latents.dtype )
_a = self.clip_model.get_image_features(A )
_a = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=A )
_a = spherical_dist_loss(A , A ).mean() * clip_guidance_scale
_a = -torch.autograd.grad(A , A )[0]
if isinstance(self.scheduler , A ):
_a = latents.detach() + grads * (sigma**2)
_a = noise_pred_original
else:
_a = noise_pred_original - torch.sqrt(A ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self , A , A , A = None , A = None , A = 512 , A = 512 , A = 0.6 , A = 50 , A = 7.5 , A = 1 , A = 0.0 , A = 100 , A = None , A = "pil" , A = True , A = 0.8 , A = 0.1 , A = 0.1 , ) -> str:
"""simple docstring"""
if isinstance(A , A ) and len(A ) != batch_size:
raise ValueError(f'''You have passed {batch_size} batch_size, but only {len(A )} generators.''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if isinstance(A , torch.Generator ) and batch_size > 1:
_a = [generator] + [None] * (batch_size - 1)
_a = [
('''model''', self.coca_model is None),
('''tokenizer''', self.coca_tokenizer is None),
('''transform''', self.coca_transform is None),
]
_a = [x[0] for x in coca_is_none if x[1]]
_a = ''', '''.join(A )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(A ):
raise ValueError(
f'''Content prompt is None and CoCa [{coca_is_none_str}] is None.'''
f'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
if style_prompt is None:
if len(A ):
raise ValueError(
f'''Style prompt is None and CoCa [{coca_is_none_str}] is None.'''
f''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' )
_a = self.get_image_description(A )
# get prompt text embeddings for content and style
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
_a = self.tokenizer(
A , padding='''max_length''' , max_length=self.tokenizer.model_max_length , truncation=A , return_tensors='''pt''' , )
_a = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
_a = slerp(A , A , A )
# duplicate text embeddings for each generation per prompt
_a = text_embeddings.repeat_interleave(A , dim=0 )
# set timesteps
_a = '''offset''' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
_a = {}
if accepts_offset:
_a = 1
self.scheduler.set_timesteps(A , **A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
_a , _a = self.get_timesteps(A , A , self.device )
_a = timesteps[:1].repeat(A )
# Preprocess image
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = preprocess(A , A , A )
_a = self.prepare_latents(
A , A , A , text_embeddings.dtype , self.device , A )
_a = slerp(A , A , A )
if clip_guidance_scale > 0:
_a = self.get_clip_image_embeddings(A , A )
_a = self.get_clip_image_embeddings(A , A )
_a = slerp(
A , A , A )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_a = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_a = content_text_input.input_ids.shape[-1]
_a = self.tokenizer([''''''] , padding='''max_length''' , max_length=A , return_tensors='''pt''' )
_a = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
_a = uncond_embeddings.repeat_interleave(A , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_a = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_a = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
_a = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
_a = torch.randn(A , generator=A , device='''cpu''' , dtype=A ).to(
self.device )
else:
_a = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_a = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_a = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_a = {}
if accepts_eta:
_a = eta
# check if the scheduler accepts generator
_a = '''generator''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
_a = generator
with self.progress_bar(total=A ):
for i, t in enumerate(A ):
# expand the latents if we are doing classifier free guidance
_a = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_a = self.scheduler.scale_model_input(A , A )
# predict the noise residual
_a = self.unet(A , A , encoder_hidden_states=A ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
_a , _a = noise_pred.chunk(2 )
_a = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
_a = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
_a , _a = self.cond_fn(
A , A , A , A , A , A , A , )
# compute the previous noisy sample x_t -> x_t-1
_a = self.scheduler.step(A , A , A , **A ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
_a = 1 / 0.18215 * latents
_a = self.vae.decode(A ).sample
_a = (image / 2 + 0.5).clamp(0 , 1 )
_a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_a = self.numpy_to_pil(A )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 11
| 1
|
"""simple docstring"""
def a_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ):
'''simple docstring'''
lowercase__ : Optional[Any] = (boundary[1] - boundary[0]) / steps
lowercase__ : Optional[int] = boundary[0]
lowercase__ : int = boundary[1]
lowercase__ : List[str] = make_points(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : Optional[int] = 0.0
y += (h / 2.0) * f(_lowerCAmelCase )
for i in x_i:
# print(i)
y += h * f(_lowerCAmelCase )
y += (h / 2.0) * f(_lowerCAmelCase )
return y
def a_ ( _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any ):
'''simple docstring'''
lowercase__ : List[Any] = a + h
while x < (b - h):
yield x
lowercase__ : Tuple = x + h
def a_ ( _lowerCAmelCase : Union[str, Any] ): # enter your function here
'''simple docstring'''
lowercase__ : Any = (x - 0) * (x - 0)
return y
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = 0.0 # Lower bound of integration
lowercase__ : List[Any] = 1.0 # Upper bound of integration
lowercase__ : str = 1_0.0 # define number of steps or resolution
lowercase__ : Dict = [a, b] # define boundary of integration
lowercase__ : int = method_a(_lowerCAmelCase , _lowerCAmelCase )
print(f"""y = {y}""" )
if __name__ == "__main__":
main()
| 720
|
"""simple docstring"""
from collections.abc import Sequence
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(_lowerCAmelCase ) )
def a_ ( _lowerCAmelCase : Sequence[float] , _lowerCAmelCase : float ):
'''simple docstring'''
lowercase__ : int = 0.0
for coeff in reversed(_lowerCAmelCase ):
lowercase__ : List[Any] = result * x + coeff
return result
if __name__ == "__main__":
_UpperCamelCase : int = (0.0, 0.0, 5.0, 9.3, 7.0)
_UpperCamelCase : Dict = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 645
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__ : int = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
a__ : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 622
|
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : int ="Speech2TextFeatureExtractor"
a : int ="Speech2TextTokenizer"
def __init__( self , snake_case__ , snake_case__ ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
lowerCAmelCase : Any = self.feature_extractor
lowerCAmelCase : str = False
def __call__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
lowerCAmelCase : Any = kwargs.pop("raw_speech" )
else:
lowerCAmelCase : Optional[int] = kwargs.pop("audio" , snake_case__ )
lowerCAmelCase : Union[str, Any] = kwargs.pop("sampling_rate" , snake_case__ )
lowerCAmelCase : str = kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
lowerCAmelCase : int = args[0]
lowerCAmelCase : List[Any] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
lowerCAmelCase : Dict = self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
lowerCAmelCase : int = self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
lowerCAmelCase : Dict = encodings["input_ids"]
return inputs
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def lowercase__ ( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def lowercase__ ( self ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
lowerCAmelCase : List[str] = True
lowerCAmelCase : Any = self.tokenizer
yield
lowerCAmelCase : Optional[Any] = self.feature_extractor
lowerCAmelCase : Dict = False
| 645
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : List[str] = {
"""configuration_efficientformer""": [
"""EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientFormerConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = ["""EfficientFormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientFormerForImageClassification""",
"""EfficientFormerForImageClassificationWithTeacher""",
"""EfficientFormerModel""",
"""EfficientFormerPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"""TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFEfficientFormerForImageClassification""",
"""TFEfficientFormerForImageClassificationWithTeacher""",
"""TFEfficientFormerModel""",
"""TFEfficientFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474
|
'''simple docstring'''
from string import ascii_uppercase
_UpperCAmelCase : Optional[int] = {char: i for i, char in enumerate(ascii_uppercase)}
_UpperCAmelCase : Optional[int] = dict(enumerate(ascii_uppercase))
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = len(lowerCamelCase)
__lowerCAmelCase = 0
while True:
if x == i:
__lowerCAmelCase = 0
if len(lowerCamelCase) == len(lowerCamelCase):
break
key += key[i]
i += 1
return key
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = 0
for letter in message:
if letter == " ":
cipher_text += " "
else:
__lowerCAmelCase = (dicta[letter] - dicta[key_new[i]]) % 2_6
i += 1
cipher_text += dicta[x]
return cipher_text
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = ''''''
__lowerCAmelCase = 0
for letter in cipher_text:
if letter == " ":
or_txt += " "
else:
__lowerCAmelCase = (dicta[letter] + dicta[key_new[i]] + 2_6) % 2_6
i += 1
or_txt += dicta[x]
return or_txt
def __magic_name__( ):
__lowerCAmelCase = '''THE GERMAN ATTACK'''
__lowerCAmelCase = '''SECRET'''
__lowerCAmelCase = generate_key(lowerCamelCase, lowerCamelCase)
__lowerCAmelCase = cipher_text(lowerCamelCase, lowerCamelCase)
print(F"""Encrypted Text = {s}""")
print(F"""Original Text = {original_text(lowerCamelCase, lowerCamelCase)}""")
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 474
| 1
|
from string import ascii_uppercase
a_ = {str(ord(c) - 55): c for c in ascii_uppercase}
def __lowerCAmelCase ( A_ : int , A_ : int ) -> str:
if isinstance(A_ , A_ ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
__UpperCAmelCase = ""
__UpperCAmelCase = 0
__UpperCAmelCase = 0
while div != 1:
__UpperCAmelCase , __UpperCAmelCase = divmod(A_ , A_ )
if base >= 11 and 9 < mod < 36:
__UpperCAmelCase = ALPHABET_VALUES[str(A_ )]
else:
__UpperCAmelCase = str(A_ )
new_value += actual_value
__UpperCAmelCase = num // base
__UpperCAmelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(A_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 221
|
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __lowerCAmelCase ( A_ : Features ) -> Optional[int]:
__UpperCAmelCase = np.inf
def set_batch_size(A_ : FeatureType ) -> None:
nonlocal batch_size
if isinstance(A_ , A_ ):
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A_ , A_ ):
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A_ , A_ ) and feature.dtype == "binary":
__UpperCAmelCase = min(A_ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A_ , A_ )
return None if batch_size is np.inf else batch_size
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Optional[Any] , __lowerCAmelCase: NestedDataStructureLike[PathLike] , __lowerCAmelCase: Optional[NamedSplit] = None , __lowerCAmelCase: Optional[Features] = None , __lowerCAmelCase: str = None , __lowerCAmelCase: bool = False , __lowerCAmelCase: bool = False , __lowerCAmelCase: Optional[int] = None , **__lowerCAmelCase: List[Any] , ) -> Any:
'''simple docstring'''
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
__UpperCAmelCase = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
__UpperCAmelCase = _PACKAGED_DATASETS_MODULES["parquet"][1]
__UpperCAmelCase = Parquet(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , hash=__lowerCAmelCase , **__lowerCAmelCase , )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
if self.streaming:
__UpperCAmelCase = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
__UpperCAmelCase = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: Optional[int] , __lowerCAmelCase: Dataset , __lowerCAmelCase: Union[PathLike, BinaryIO] , __lowerCAmelCase: Optional[int] = None , **__lowerCAmelCase: Optional[int] , ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = dataset
__UpperCAmelCase = path_or_buf
__UpperCAmelCase = batch_size or get_writer_batch_size(dataset.features )
__UpperCAmelCase = parquet_writer_kwargs
def _UpperCAmelCase ( self: Optional[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , "wb+" ) as buffer:
__UpperCAmelCase = self._write(file_obj=__lowerCAmelCase , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
else:
__UpperCAmelCase = self._write(file_obj=self.path_or_buf , batch_size=__lowerCAmelCase , **self.parquet_writer_kwargs )
return written
def _UpperCAmelCase ( self: Tuple , __lowerCAmelCase: BinaryIO , __lowerCAmelCase: int , **__lowerCAmelCase: List[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = parquet_writer_kwargs.pop("path_or_buf" , __lowerCAmelCase )
__UpperCAmelCase = self.dataset.features.arrow_schema
__UpperCAmelCase = pq.ParquetWriter(__lowerCAmelCase , schema=__lowerCAmelCase , **__lowerCAmelCase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , __lowerCAmelCase ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ):
__UpperCAmelCase = query_table(
table=self.dataset._data , key=slice(__lowerCAmelCase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(__lowerCAmelCase )
written += batch.nbytes
writer.close()
return written
| 221
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : Optional[int] = logging.get_logger(__name__)
__A : List[str] = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class _UpperCAmelCase ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = 'swin2sr'
SCREAMING_SNAKE_CASE_ : Optional[int] = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , A : Any=64 , A : Optional[Any]=1 , A : Dict=3 , A : Any=1_80 , A : Optional[Any]=[6, 6, 6, 6, 6, 6] , A : Union[str, Any]=[6, 6, 6, 6, 6, 6] , A : Tuple=8 , A : int=2.0 , A : List[str]=True , A : int=0.0 , A : Any=0.0 , A : Dict=0.1 , A : Optional[Any]="gelu" , A : List[Any]=False , A : int=0.02 , A : Union[str, Any]=1e-5 , A : List[Any]=2 , A : List[Any]=1.0 , A : Any="1conv" , A : Dict="pixelshuffle" , **A : Dict , ) -> List[str]:
super().__init__(**UpperCamelCase_ )
lowercase_ : Optional[Any] = image_size
lowercase_ : Any = patch_size
lowercase_ : Optional[int] = num_channels
lowercase_ : str = embed_dim
lowercase_ : Any = depths
lowercase_ : str = len(UpperCamelCase_ )
lowercase_ : Any = num_heads
lowercase_ : Dict = window_size
lowercase_ : Dict = mlp_ratio
lowercase_ : Optional[int] = qkv_bias
lowercase_ : List[str] = hidden_dropout_prob
lowercase_ : Dict = attention_probs_dropout_prob
lowercase_ : Optional[Any] = drop_path_rate
lowercase_ : Optional[Any] = hidden_act
lowercase_ : List[str] = use_absolute_embeddings
lowercase_ : Any = layer_norm_eps
lowercase_ : str = initializer_range
lowercase_ : int = upscale
lowercase_ : Tuple = img_range
lowercase_ : Dict = resi_connection
lowercase_ : Tuple = upsampler
| 714
|
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCAmelCase ( _A ):
def __init__( self : Optional[int] , A : Optional[Any]=0.01 , A : int=10_00 ) -> Optional[int]:
lowercase_ : Dict = p_stop
lowercase_ : Optional[Any] = max_length
def __iter__( self : Dict ) -> Dict:
lowercase_ : str = 0
lowercase_ : Optional[int] = False
while not stop and count < self.max_length:
yield count
count += 1
lowercase_ : List[str] = random.random() < self.p_stop
class _UpperCAmelCase ( unittest.TestCase ):
def A ( self : List[Any] , A : Any , A : Union[str, Any] , A : Optional[Any]=False , A : Dict=True ) -> str:
lowercase_ : Tuple = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
lowercase_ : Optional[Any] = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def A ( self : Dict ) -> Tuple:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : Union[str, Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Tuple = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : str = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : Tuple = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[int] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
lowercase_ : str = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
lowercase_ : Any = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : List[str] = [[], []]
self.check_batch_sampler_shards(A , A )
def A ( self : str ) -> str:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : List[str] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Any = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
lowercase_ : Optional[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def A ( self : str ) -> int:
# Check the shards when the dataset is a round multiple of total batch size.
lowercase_ : str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : Union[str, Any] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
lowercase_ : str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
lowercase_ : List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : str = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
lowercase_ : Any = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Tuple = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
lowercase_ : List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
lowercase_ : Optional[Any] = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def A ( self : Optional[Any] ) -> Union[str, Any]:
# Check the shards when the dataset is a round multiple of batch size.
lowercase_ : Any = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
lowercase_ : int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : int = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
lowercase_ : List[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
lowercase_ : str = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
lowercase_ : Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
lowercase_ : Optional[int] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Union[str, Any] = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
lowercase_ : List[str] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
lowercase_ : Dict = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def A ( self : str ) -> str:
lowercase_ : str = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
lowercase_ : Tuple = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A ( self : Union[str, Any] , A : Union[str, Any] , A : Tuple , A : Dict , A : str=False , A : Any=2 , A : Optional[int]=False ) -> Optional[Any]:
random.seed(A )
lowercase_ : Any = list(A )
lowercase_ : Optional[int] = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
lowercase_ : Any = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
lowercase_ : List[Any] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
lowercase_ : Dict = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
lowercase_ : Optional[int] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def A ( self : Optional[Any] ) -> List[str]:
lowercase_ : int = 42
lowercase_ : Tuple = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
lowercase_ : List[str] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def A ( self : Optional[Any] ) -> Tuple:
lowercase_ : List[str] = BatchSampler(range(16 ) , batch_size=4 , drop_last=A )
lowercase_ : int = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> Union[str, Any]:
lowercase_ : int = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : Dict ) -> int:
lowercase_ : Optional[Any] = DataLoader(list(range(16 ) ) , batch_size=4 )
lowercase_ : Union[str, Any] = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A ( self : List[str] ) -> str:
lowercase_ : Any = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A ( self : Optional[Any] ) -> Optional[int]:
Accelerator()
lowercase_ : Tuple = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 141
| 0
|
import unittest
import numpy as np
import torch
from diffusers import DDIMPipeline, DDIMScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = DDIMPipeline
snake_case_ = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''latents''',
'''callback''',
'''callback_steps''',
}
snake_case_ = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
snake_case_ = False
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
__lowerCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = {'unet': unet, 'scheduler': scheduler}
return components
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=0 ) -> List[Any]:
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
__lowerCamelCase = torch.manual_seed(lowerCamelCase__ )
else:
__lowerCamelCase = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__lowerCamelCase = {
'batch_size': 1,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = 'cpu'
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = self.get_dummy_inputs(lowerCamelCase__ )
__lowerCamelCase = pipe(**lowerCamelCase__ ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 32, 32, 3) )
__lowerCamelCase = np.array(
[1.0_00e00, 5.7_17e-01, 4.7_17e-01, 1.0_00e00, 0.0_00e00, 1.0_00e00, 3.0_00e-04, 0.0_00e00, 9.0_00e-04] )
__lowerCamelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ , 1e-3 )
def lowercase_ ( self ) -> Optional[Any]:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def lowercase_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def lowercase_ ( self ) -> Dict:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = 'google/ddpm-cifar10-32'
__lowerCamelCase = UNetaDModel.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = DDIMScheduler()
__lowerCamelCase = DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddim.to(lowerCamelCase__ )
ddim.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ddim(generator=lowerCamelCase__ , eta=0.0 , output_type='numpy' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__lowerCamelCase = np.array([0.17_23, 0.16_17, 0.16_00, 0.16_26, 0.14_97, 0.15_13, 0.15_05, 0.14_42, 0.14_53] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowercase_ ( self ) -> str:
'''simple docstring'''
__lowerCamelCase = 'google/ddpm-ema-bedroom-256'
__lowerCamelCase = UNetaDModel.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = DDIMScheduler.from_pretrained(lowerCamelCase__ )
__lowerCamelCase = DDIMPipeline(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
ddpm.to(lowerCamelCase__ )
ddpm.set_progress_bar_config(disable=lowerCamelCase__ )
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = ddpm(generator=lowerCamelCase__ , output_type='numpy' ).images
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
__lowerCamelCase = np.array([0.00_60, 0.02_01, 0.03_44, 0.00_24, 0.00_18, 0.00_02, 0.00_22, 0.00_00, 0.00_69] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 469
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def lowerCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
__lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=UpperCamelCase__ )
__lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=UpperCamelCase__ )
env_command_parser(subparsers=UpperCamelCase__ )
launch_command_parser(subparsers=UpperCamelCase__ )
tpu_command_parser(subparsers=UpperCamelCase__ )
test_command_parser(subparsers=UpperCamelCase__ )
# Let's go
__lowerCamelCase = parser.parse_args()
if not hasattr(UpperCamelCase__ , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 469
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case :Optional[Any] =logging.get_logger(__name__)
__snake_case :str ={
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Optional[int] = 'align_text_model'
def __init__( self : List[Any] , __UpperCamelCase : Any=30_522 , __UpperCamelCase : Dict=768 , __UpperCamelCase : int=12 , __UpperCamelCase : int=12 , __UpperCamelCase : Optional[int]=3_072 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[str]=512 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : Any=1e-12 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Optional[int]="absolute" , __UpperCamelCase : List[Any]=True , **__UpperCamelCase : Any , ) -> str:
super().__init__(**__UpperCamelCase )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = pad_token_id
@classmethod
def __UpperCamelCase ( cls : Any , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : Dict ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
A , A = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
A = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : Tuple = 'align_vision_model'
def __init__( self : Any , __UpperCamelCase : int = 3 , __UpperCamelCase : int = 600 , __UpperCamelCase : float = 2.0 , __UpperCamelCase : float = 3.1 , __UpperCamelCase : int = 8 , __UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , __UpperCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , __UpperCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , __UpperCamelCase : List[int] = [] , __UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , __UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , __UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , __UpperCamelCase : float = 0.2_5 , __UpperCamelCase : str = "swish" , __UpperCamelCase : int = 2_560 , __UpperCamelCase : str = "mean" , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : float = 0.0_0_1 , __UpperCamelCase : float = 0.9_9 , __UpperCamelCase : float = 0.2 , **__UpperCamelCase : Optional[Any] , ) -> int:
super().__init__(**__UpperCamelCase )
A = num_channels
A = image_size
A = width_coefficient
A = depth_coefficient
A = depth_divisor
A = kernel_sizes
A = in_channels
A = out_channels
A = depthwise_padding
A = strides
A = num_block_repeats
A = expand_ratios
A = squeeze_expansion_ratio
A = hidden_act
A = hidden_dim
A = pooling_type
A = initializer_range
A = batch_norm_eps
A = batch_norm_momentum
A = drop_connect_rate
A = sum(__UpperCamelCase ) * 4
@classmethod
def __UpperCamelCase ( cls : Dict , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : int ) -> "PretrainedConfig":
cls._set_token_in_kwargs(__UpperCamelCase )
A , A = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
A = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class lowerCAmelCase__ ( _lowerCamelCase ):
A_ : str = 'align'
A_ : int = True
def __init__( self : Dict , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=640 , __UpperCamelCase : List[Any]=1.0 , __UpperCamelCase : Optional[Any]=0.0_2 , **__UpperCamelCase : List[Any] , ) -> Tuple:
super().__init__(**__UpperCamelCase )
if text_config is None:
A = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
A = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
A = AlignTextConfig(**__UpperCamelCase )
A = AlignVisionConfig(**__UpperCamelCase )
A = projection_dim
A = temperature_init_value
A = initializer_range
@classmethod
def __UpperCamelCase ( cls : Any , __UpperCamelCase : AlignTextConfig , __UpperCamelCase : AlignVisionConfig , **__UpperCamelCase : Optional[int] ) -> Union[str, Any]:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> int:
A = copy.deepcopy(self.__dict__ )
A = self.text_config.to_dict()
A = self.vision_config.to_dict()
A = self.__class__.model_type
return output
| 224
|
def lowerCamelCase_ ( lowerCAmelCase__ : list ) -> int:
'''simple docstring'''
if not grid or not grid[0]:
raise TypeError('The grid does not contain the appropriate information' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
A = grid[0]
for row_n in range(1 , len(lowerCAmelCase__ ) ):
A = grid[row_n]
A = fill_row(lowerCAmelCase__ , lowerCAmelCase__ )
A = grid[row_n]
return grid[-1][-1]
def lowerCamelCase_ ( lowerCAmelCase__ : list , lowerCAmelCase__ : list ) -> list:
'''simple docstring'''
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase__ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224
| 1
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE ( metaclass=__a ):
"""simple docstring"""
__A = ["torch", "torchsde"]
def __init__( self : List[Any] , *__lowerCAmelCase : int , **__lowerCAmelCase : Tuple ):
"""simple docstring"""
requires_backends(self , ['torch', 'torchsde'] )
@classmethod
def a ( cls : Optional[int] , *__lowerCAmelCase : List[str] , **__lowerCAmelCase : int ):
"""simple docstring"""
requires_backends(cls , ['torch', 'torchsde'] )
@classmethod
def a ( cls : int , *__lowerCAmelCase : Any , **__lowerCAmelCase : Dict ):
"""simple docstring"""
requires_backends(cls , ['torch', 'torchsde'] )
| 309
|
'''simple docstring'''
import operator as op
snake_case = '''scaler.pt'''
snake_case = '''pytorch_model'''
snake_case = '''random_states'''
snake_case = '''optimizer'''
snake_case = '''scheduler'''
snake_case = '''pytorch_model.bin'''
snake_case = '''pytorch_model.bin.index.json'''
snake_case = '''model.safetensors'''
snake_case = '''model.safetensors.index.json'''
snake_case = '''1.10.2'''
snake_case = '''py38'''
snake_case = '''4.17.0'''
snake_case = ['''ml.p3.16xlarge''', '''ml.p3dn.24xlarge''', '''ml.p4dn.24xlarge''']
snake_case = ['''FULL_SHARD''', '''SHARD_GRAD_OP''', '''NO_SHARD''', '''HYBRID_SHARD''', '''HYBRID_SHARD_ZERO2''']
snake_case = ['''TRANSFORMER_BASED_WRAP''', '''SIZE_BASED_WRAP''', '''NO_WRAP''']
snake_case = ['''BACKWARD_PRE''', '''BACKWARD_POST''', '''NO_PREFETCH''']
snake_case = ['''FULL_STATE_DICT''', '''LOCAL_STATE_DICT''', '''SHARDED_STATE_DICT''']
snake_case = '''2.0.1'''
snake_case = ['''pdsh''', '''standard''', '''openmpi''', '''mvapich''']
snake_case = ['''default''', '''reduce-overhead''', '''max-autotune''']
snake_case = {'''>''': op.gt, '''>=''': op.ge, '''==''': op.eq, '''!=''': op.ne, '''<=''': op.le, '''<''': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
snake_case = [
'''nnodes''',
'''nproc_per_node''',
'''rdzv_backend''',
'''rdzv_endpoint''',
'''rdzv_id''',
'''rdzv_conf''',
'''standalone''',
'''max_restarts''',
'''monitor_interval''',
'''start_method''',
'''role''',
'''module''',
'''m''',
'''no_python''',
'''run_path''',
'''log_dir''',
'''r''',
'''redirects''',
'''t''',
'''tee''',
'''node_rank''',
'''master_addr''',
'''master_port''',
]
snake_case = ['''DEEPSPEED''', '''MULTI_GPU''', '''FSDP''', '''MEGATRON_LM''']
snake_case = ['''DEEPSPEED''', '''MULTI_XPU''', '''FSDP''']
| 309
| 1
|
snake_case = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
snake_case = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
snake_case = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 488
|
from __future__ import annotations
from cmath import sqrt
def lowerCamelCase__ ( lowercase , lowercase , lowercase ):
"""simple docstring"""
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
SCREAMING_SNAKE_CASE : List[str] = b * b - 4 * a * c
SCREAMING_SNAKE_CASE : str = (-b + sqrt(lowercase )) / (2 * a)
SCREAMING_SNAKE_CASE : Any = (-b - sqrt(lowercase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def lowerCamelCase__ ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 488
| 1
|
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
def __a ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str ):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
a__ : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
a__ : Any = _calculate(days - 1 , UpperCAmelCase_ , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
a__ : List[Any] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
a__ : List[Any] = _calculate(days - 1 , UpperCAmelCase_ , 0 )
a__ : List[str] = state_late + state_absent + state_ontime
a__ : Tuple = prizestrings
return prizestrings
def __a ( lowerCAmelCase__ : List[Any] = 30 ):
return _calculate(UpperCAmelCase_ , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 688
|
"""simple docstring"""
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE = """MobileNetV1Config"""
# Base docstring
SCREAMING_SNAKE_CASE = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE = [1, 1_024, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE = """google/mobilenet_v1_1.0_224"""
SCREAMING_SNAKE_CASE = """tabby, tabby cat"""
SCREAMING_SNAKE_CASE = [
"""google/mobilenet_v1_1.0_224""",
"""google/mobilenet_v1_0.75_192""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None )-> int:
"""simple docstring"""
UpperCamelCase = {}
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = model.mobilenet_va
else:
UpperCamelCase = model
UpperCamelCase = "MobilenetV1/Conv2d_0/"
UpperCamelCase = backbone.conv_stem.convolution.weight
UpperCamelCase = backbone.conv_stem.normalization.bias
UpperCamelCase = backbone.conv_stem.normalization.weight
UpperCamelCase = backbone.conv_stem.normalization.running_mean
UpperCamelCase = backbone.conv_stem.normalization.running_var
for i in range(13 ):
UpperCamelCase = i + 1
UpperCamelCase = i * 2
UpperCamelCase = backbone.layer[pt_index]
UpperCamelCase = F"MobilenetV1/Conv2d_{tf_index}_depthwise/"
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
UpperCamelCase = backbone.layer[pt_index + 1]
UpperCamelCase = F"MobilenetV1/Conv2d_{tf_index}_pointwise/"
UpperCamelCase = pointer.convolution.weight
UpperCamelCase = pointer.normalization.bias
UpperCamelCase = pointer.normalization.weight
UpperCamelCase = pointer.normalization.running_mean
UpperCamelCase = pointer.normalization.running_var
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = "MobilenetV1/Logits/Conv2d_1c_1x1/"
UpperCamelCase = model.classifier.weight
UpperCamelCase = model.classifier.bias
return tf_to_pt_map
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )-> List[str]:
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
"Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see "
"https://www.tensorflow.org/install/ for installation instructions." )
raise
# Load weights from TF model
UpperCamelCase = tf.train.list_variables(UpperCAmelCase_ )
UpperCamelCase = {}
for name, shape in init_vars:
logger.info(F"Loading TF weight {name} with shape {shape}" )
UpperCamelCase = tf.train.load_variable(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCamelCase = array
# Build TF to PyTorch weights loading map
UpperCamelCase = _build_tf_to_pytorch_map(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
for name, pointer in tf_to_pt_map.items():
logger.info(F"Importing {name}" )
if name not in tf_weights:
logger.info(F"{name} not in tf pre-trained weights, skipping" )
continue
UpperCamelCase = tf_weights[name]
if "depthwise_weights" in name:
logger.info("Transposing depthwise" )
UpperCamelCase = np.transpose(UpperCAmelCase_ , (2, 3, 0, 1) )
elif "weights" in name:
logger.info("Transposing" )
if len(pointer.shape ) == 2: # copying into linear layer
UpperCamelCase = array.squeeze().transpose()
else:
UpperCamelCase = np.transpose(UpperCAmelCase_ , (3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(F"Initialize PyTorch weight {name} {array.shape}" )
UpperCamelCase = torch.from_numpy(UpperCAmelCase_ )
tf_weights.pop(UpperCAmelCase_ , UpperCAmelCase_ )
tf_weights.pop(name + "/RMSProp" , UpperCAmelCase_ )
tf_weights.pop(name + "/RMSProp_1" , UpperCAmelCase_ )
tf_weights.pop(name + "/ExponentialMovingAverage" , UpperCAmelCase_ )
logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ )-> torch.Tensor:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = features.shape[-2:]
UpperCamelCase , UpperCamelCase = conv_layer.stride
UpperCamelCase , UpperCamelCase = conv_layer.kernel_size
if in_height % stride_height == 0:
UpperCamelCase = max(kernel_height - stride_height , 0 )
else:
UpperCamelCase = max(kernel_height - (in_height % stride_height) , 0 )
if in_width % stride_width == 0:
UpperCamelCase = max(kernel_width - stride_width , 0 )
else:
UpperCamelCase = max(kernel_width - (in_width % stride_width) , 0 )
UpperCamelCase = pad_along_width // 2
UpperCamelCase = pad_along_width - pad_left
UpperCamelCase = pad_along_height // 2
UpperCamelCase = pad_along_height - pad_top
UpperCamelCase = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(UpperCAmelCase_ , UpperCAmelCase_ , "constant" , 0.0 )
class __a ( nn.Module ):
def __init__( self : List[Any] , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool or str] = True , )-> None:
"""simple docstring"""
super().__init__()
UpperCamelCase = config
if in_channels % groups != 0:
raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." )
UpperCamelCase = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
UpperCamelCase = nn.Convad(
in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=UpperCAmelCase_ , stride=UpperCAmelCase_ , padding=UpperCAmelCase_ , groups=UpperCAmelCase_ , bias=UpperCAmelCase_ , padding_mode="zeros" , )
if use_normalization:
UpperCamelCase = nn.BatchNormad(
num_features=UpperCAmelCase_ , eps=config.layer_norm_eps , momentum=0.9997 , affine=UpperCAmelCase_ , track_running_stats=UpperCAmelCase_ , )
else:
UpperCamelCase = None
if use_activation:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCamelCase = ACTaFN[use_activation]
elif isinstance(config.hidden_act , UpperCAmelCase_ ):
UpperCamelCase = ACTaFN[config.hidden_act]
else:
UpperCamelCase = config.hidden_act
else:
UpperCamelCase = None
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : torch.Tensor )-> torch.Tensor:
"""simple docstring"""
if self.config.tf_padding:
UpperCamelCase = apply_tf_padding(UpperCAmelCase_ , self.convolution )
UpperCamelCase = self.convolution(UpperCAmelCase_ )
if self.normalization is not None:
UpperCamelCase = self.normalization(UpperCAmelCase_ )
if self.activation is not None:
UpperCamelCase = self.activation(UpperCAmelCase_ )
return features
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[str] = MobileNetVaConfig
UpperCamelCase_ : Dict = load_tf_weights_in_mobilenet_va
UpperCamelCase_ : List[str] = '''mobilenet_v1'''
UpperCamelCase_ : Optional[int] = '''pixel_values'''
UpperCamelCase_ : List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Any , UpperCAmelCase_ : Union[nn.Linear, nn.Convad] )-> None:
"""simple docstring"""
if isinstance(UpperCAmelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCAmelCase_ , nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
SCREAMING_SNAKE_CASE = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
SCREAMING_SNAKE_CASE = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`MobileNetV1ImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , _lowerCAmelCase , )
class __a ( _lowerCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : MobileNetVaConfig , UpperCAmelCase_ : bool = True )-> Optional[int]:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
UpperCamelCase = config
UpperCamelCase = 32
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
UpperCamelCase = MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=config.num_channels , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=2 , )
UpperCamelCase = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
UpperCamelCase = nn.ModuleList()
for i in range(13 ):
UpperCamelCase = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
UpperCamelCase = max(int(depth * config.depth_multiplier ) , config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=3 , stride=strides[i] , groups=UpperCAmelCase_ , ) )
self.layer.append(
MobileNetVaConvLayer(
UpperCAmelCase_ , in_channels=UpperCAmelCase_ , out_channels=UpperCAmelCase_ , kernel_size=1 , ) )
UpperCamelCase = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , UpperCAmelCase_ : Dict )-> List[str]:
"""simple docstring"""
raise NotImplementedError
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _SCREAMING_SNAKE_CASE ( self : Tuple , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[bool] = None , )-> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
"""simple docstring"""
UpperCamelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values" )
UpperCamelCase = self.conv_stem(UpperCAmelCase_ )
UpperCamelCase = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
UpperCamelCase = layer_module(UpperCAmelCase_ )
if output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = hidden_states
if self.pooler is not None:
UpperCamelCase = torch.flatten(self.pooler(UpperCAmelCase_ ) , start_dim=1 )
else:
UpperCamelCase = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=UpperCAmelCase_ , pooler_output=UpperCAmelCase_ , hidden_states=UpperCAmelCase_ , )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , _lowerCAmelCase , )
class __a ( _lowerCAmelCase ):
def __init__( self : str , UpperCAmelCase_ : MobileNetVaConfig )-> None:
"""simple docstring"""
super().__init__(UpperCAmelCase_ )
UpperCamelCase = config.num_labels
UpperCamelCase = MobileNetVaModel(UpperCAmelCase_ )
UpperCamelCase = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
UpperCamelCase = nn.Dropout(config.classifier_dropout_prob , inplace=UpperCAmelCase_ )
UpperCamelCase = nn.Linear(UpperCAmelCase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[bool] = None , )-> Union[tuple, ImageClassifierOutputWithNoAttention]:
"""simple docstring"""
UpperCamelCase = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase = self.mobilenet_va(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
UpperCamelCase = outputs.pooler_output if return_dict else outputs[1]
UpperCamelCase = self.classifier(self.dropout(UpperCAmelCase_ ) )
UpperCamelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
UpperCamelCase = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
UpperCamelCase = "single_label_classification"
else:
UpperCamelCase = "multi_label_classification"
if self.config.problem_type == "regression":
UpperCamelCase = MSELoss()
if self.num_labels == 1:
UpperCamelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
UpperCamelCase = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
UpperCamelCase = BCEWithLogitsLoss()
UpperCamelCase = loss_fct(UpperCAmelCase_ , UpperCAmelCase_ )
if not return_dict:
UpperCamelCase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=UpperCAmelCase_ , logits=UpperCAmelCase_ , hidden_states=outputs.hidden_states , )
| 554
| 0
|
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict=1_3 , lowerCAmelCase__ : List[str]=7 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=True , lowerCAmelCase__ : List[Any]=9_9 , lowerCAmelCase__ : str=3_2 , lowerCAmelCase__ : Union[str, Any]=5 , lowerCAmelCase__ : Optional[int]=4 , lowerCAmelCase__ : Tuple=3_7 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Optional[int]=5_1_2 , lowerCAmelCase__ : int=1_6 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : int=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Dict=4 , lowerCAmelCase__ : Any=None , ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : Optional[Any] = seq_length
_UpperCAmelCase : List[Any] = is_training
_UpperCAmelCase : Dict = use_input_mask
_UpperCAmelCase : Optional[int] = use_token_type_ids
_UpperCAmelCase : Tuple = use_labels
_UpperCAmelCase : int = vocab_size
_UpperCAmelCase : Dict = hidden_size
_UpperCAmelCase : Optional[int] = num_hidden_layers
_UpperCAmelCase : str = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Dict = hidden_act
_UpperCAmelCase : str = hidden_dropout_prob
_UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob
_UpperCAmelCase : Any = max_position_embeddings
_UpperCAmelCase : str = type_vocab_size
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : Any = initializer_range
_UpperCAmelCase : Dict = num_labels
_UpperCAmelCase : str = num_choices
_UpperCAmelCase : Optional[int] = scope
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Tuple = None
if self.use_input_mask:
_UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase : Union[str, Any] = None
if self.use_token_type_ids:
_UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase : Optional[Any] = None
_UpperCAmelCase : int = None
_UpperCAmelCase : List[Any] = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCAmelCase ( self : int ) -> Any:
"""simple docstring"""
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = NystromformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase : Any = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase : Optional[int] = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase : Optional[int] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase : int = NystromformerForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = NystromformerForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase : Dict = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Any = self.num_labels
_UpperCAmelCase : Tuple = NystromformerForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase : List[str] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = NystromformerForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase : Optional[int] = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = self.num_choices
_UpperCAmelCase : Dict = NystromformerForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase : Optional[int] = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Tuple = config_and_inputs
_UpperCAmelCase : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : List[str] = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ : Union[str, Any] = False
UpperCamelCase_ : List[str] = False
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : int = NystromformerModelTester(self )
_UpperCAmelCase : str = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=3_7 )
def _lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_UpperCAmelCase : Dict = type
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[Any] = NystromformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : int = NystromformerModel.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
_UpperCAmelCase : Optional[int] = model(__UpperCamelCase )[0]
_UpperCAmelCase : Dict = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase : Dict = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) )
@slow
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : List[str] = "the [MASK] of Belgium is Brussels"
_UpperCAmelCase : str = AutoTokenizer.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase : Optional[int] = NystromformerForMaskedLM.from_pretrained("uw-madison/nystromformer-512" )
_UpperCAmelCase : Optional[int] = tokenizer(__UpperCamelCase , return_tensors="pt" )
with torch.no_grad():
_UpperCAmelCase : int = model(encoding.input_ids ).logits
_UpperCAmelCase : Dict = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(__UpperCamelCase ) , "capital" )
| 715
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class A__ :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Any=sys.maxsize ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "bilinear"
_UpperCAmelCase : Union[str, Any] = max_size
_UpperCAmelCase : Tuple = short_edge_length
def __call__( self : List[str] , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = []
for img in imgs:
_UpperCAmelCase , _UpperCAmelCase : List[Any] = img.shape[:2]
# later: provide list and randomly choose index for resize
_UpperCAmelCase : List[Any] = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
_UpperCAmelCase : List[Any] = size * 1.0 / min(lowerCAmelCase__ , lowerCAmelCase__ )
if h < w:
_UpperCAmelCase , _UpperCAmelCase : str = size, scale * w
else:
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = scale * h, size
if max(lowerCAmelCase__ , lowerCAmelCase__ ) > self.max_size:
_UpperCAmelCase : List[Any] = self.max_size * 1.0 / max(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = newh * scale
_UpperCAmelCase : str = neww * scale
_UpperCAmelCase : Union[str, Any] = int(neww + 0.5 )
_UpperCAmelCase : Optional[int] = int(newh + 0.5 )
if img.dtype == np.uinta:
_UpperCAmelCase : Optional[Any] = Image.fromarray(lowerCAmelCase__ )
_UpperCAmelCase : Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
_UpperCAmelCase : Optional[Any] = np.asarray(lowerCAmelCase__ )
else:
_UpperCAmelCase : Tuple = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
_UpperCAmelCase : str = nn.functional.interpolate(
lowerCAmelCase__ , (newh, neww) , mode=self.interp_method , align_corners=lowerCAmelCase__ ).squeeze(0 )
img_augs.append(lowerCAmelCase__ )
return img_augs
class A__ :
"""simple docstring"""
def __init__( self : str , lowerCAmelCase__ : Optional[int] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
_UpperCAmelCase : str = cfg.INPUT.FORMAT
_UpperCAmelCase : List[Any] = cfg.SIZE_DIVISIBILITY
_UpperCAmelCase : int = cfg.PAD_VALUE
_UpperCAmelCase : Optional[int] = cfg.INPUT.MAX_SIZE_TEST
_UpperCAmelCase : Tuple = cfg.MODEL.DEVICE
_UpperCAmelCase : Union[str, Any] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCAmelCase : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
_UpperCAmelCase : Any = lambda lowerCAmelCase__ : (x - self.pixel_mean) / self.pixel_std
def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : List[str] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = tuple(max(lowerCAmelCase__ ) for s in zip(*[img.shape for img in images] ) )
_UpperCAmelCase : str = [im.shape[-2:] for im in images]
_UpperCAmelCase : Any = [
nn.functional.pad(
lowerCAmelCase__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return torch.stack(lowerCAmelCase__ ), torch.tensor(lowerCAmelCase__ )
def __call__( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=False ) -> str:
"""simple docstring"""
with torch.no_grad():
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCAmelCase : Any = [images]
if single_image:
assert len(lowerCAmelCase__ ) == 1
for i in range(len(lowerCAmelCase__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(lowerCAmelCase__ , images.pop(lowerCAmelCase__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
lowerCAmelCase__ , torch.as_tensor(img_tensorize(images.pop(lowerCAmelCase__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
_UpperCAmelCase : Any = torch.tensor([im.shape[:2] for im in images] )
_UpperCAmelCase : str = self.aug(lowerCAmelCase__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
_UpperCAmelCase : int = [self.normalizer(lowerCAmelCase__ ) for x in images]
# now pad them to do the following operations
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.pad(lowerCAmelCase__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
_UpperCAmelCase : int = torch.true_divide(lowerCAmelCase__ , lowerCAmelCase__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def __UpperCAmelCase ( a_: Union[str, Any], a_: Optional[int] ):
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def __UpperCAmelCase ( a_: Tuple, a_: Tuple[int, int] ):
assert torch.isfinite(a_ ).all(), "Box tensor contains infinite or NaN!"
_UpperCAmelCase , _UpperCAmelCase : Tuple = box_size
tensor[:, 0].clamp_(min=0, max=a_ )
tensor[:, 1].clamp_(min=0, max=a_ )
tensor[:, 2].clamp_(min=0, max=a_ )
tensor[:, 3].clamp_(min=0, max=a_ )
| 257
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
def UpperCamelCase ( a ) -> Optional[int]:
'''simple docstring'''
__magic_name__ = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
__magic_name__ = 128
elif "12-12" in model_name:
__magic_name__ = 12
__magic_name__ = 12
elif "14-14" in model_name:
__magic_name__ = 14
__magic_name__ = 14
elif "16-16" in model_name:
__magic_name__ = 16
__magic_name__ = 16
else:
raise ValueError('''Model not supported''' )
__magic_name__ = '''huggingface/label-files'''
if "speech-commands" in model_name:
__magic_name__ = 35
__magic_name__ = '''speech-commands-v2-id2label.json'''
else:
__magic_name__ = 527
__magic_name__ = '''audioset-id2label.json'''
__magic_name__ = json.load(open(hf_hub_download(a , a , repo_type='''dataset''' ) , '''r''' ) )
__magic_name__ = {int(a ): v for k, v in idalabel.items()}
__magic_name__ = idalabel
__magic_name__ = {v: k for k, v in idalabel.items()}
return config
def UpperCamelCase ( a ) -> List[Any]:
'''simple docstring'''
if "module.v" in name:
__magic_name__ = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
__magic_name__ = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
__magic_name__ = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
__magic_name__ = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
__magic_name__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
__magic_name__ = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
__magic_name__ = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
__magic_name__ = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
__magic_name__ = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
__magic_name__ = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
__magic_name__ = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
__magic_name__ = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
__magic_name__ = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
__magic_name__ = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
__magic_name__ = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def UpperCamelCase ( a , a ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__magic_name__ = orig_state_dict.pop(a )
if "qkv" in key:
__magic_name__ = key.split('''.''' )
__magic_name__ = int(key_split[3] )
__magic_name__ = config.hidden_size
if "weight" in key:
__magic_name__ = val[:dim, :]
__magic_name__ = val[dim : dim * 2, :]
__magic_name__ = val[-dim:, :]
else:
__magic_name__ = val[:dim]
__magic_name__ = val[dim : dim * 2]
__magic_name__ = val[-dim:]
else:
__magic_name__ = val
return orig_state_dict
def UpperCamelCase ( a ) -> Optional[Any]:
'''simple docstring'''
__magic_name__ = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(a , a )
@torch.no_grad()
def UpperCamelCase ( a , a , a=False ) -> List[Any]:
'''simple docstring'''
__magic_name__ = get_audio_spectrogram_transformer_config(a )
__magic_name__ = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
__magic_name__ = model_name_to_url[model_name]
__magic_name__ = torch.hub.load_state_dict_from_url(a , map_location='''cpu''' )
# remove some keys
remove_keys(a )
# rename some keys
__magic_name__ = convert_state_dict(a , a )
# load 🤗 model
__magic_name__ = ASTForAudioClassification(a )
model.eval()
model.load_state_dict(a )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
__magic_name__ = -4.2_67_73_93 if '''speech-commands''' not in model_name else -6.84_59_78
__magic_name__ = 4.5_68_99_74 if '''speech-commands''' not in model_name else 5.5_65_45_26
__magic_name__ = 1024 if '''speech-commands''' not in model_name else 128
__magic_name__ = ASTFeatureExtractor(mean=a , std=a , max_length=a )
if "speech-commands" in model_name:
__magic_name__ = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
__magic_name__ = dataset[0]['''audio''']['''array''']
else:
__magic_name__ = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
__magic_name__ , __magic_name__ = torchaudio.load(a )
__magic_name__ = waveform.squeeze().numpy()
__magic_name__ = feature_extractor(a , sampling_rate=1_6000 , return_tensors='''pt''' )
# forward pass
__magic_name__ = model(**a )
__magic_name__ = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
__magic_name__ = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
__magic_name__ = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
__magic_name__ = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
__magic_name__ = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
__magic_name__ = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
__magic_name__ = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
__magic_name__ = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
__magic_name__ = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , a , atol=1e-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(a ).mkdir(exist_ok=a )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(a )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(a )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_lowerCAmelCase = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 432
|
'''simple docstring'''
_lowerCAmelCase = "\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
_lowerCAmelCase = [{"type": "code", "content": INSTALL_CONTENT}]
_lowerCAmelCase = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 432
| 1
|
"""simple docstring"""
from itertools import count
def _A ( __lowercase = 50 ):
"""simple docstring"""
lowerCamelCase__ = [1] * min_block_length
for n in count(__lowercase ):
fill_count_functions.append(1 )
for block_length in range(__lowercase , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 100_0000:
break
return n
if __name__ == "__main__":
print(F'{solution() = }')
| 258
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__lowerCAmelCase = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
__lowerCAmelCase = {
'''gpt-neox-20b''': 2_048,
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self : str ,_UpperCAmelCase : Dict=None ,_UpperCAmelCase : Any=None ,_UpperCAmelCase : int=None ,_UpperCAmelCase : str="<|endoftext|>" ,_UpperCAmelCase : List[Any]="<|endoftext|>" ,_UpperCAmelCase : Any="<|endoftext|>" ,_UpperCAmelCase : Optional[Any]=False ,**_UpperCAmelCase : Any ,):
super().__init__(
_UpperCAmelCase ,_UpperCAmelCase ,tokenizer_file=_UpperCAmelCase ,unk_token=_UpperCAmelCase ,bos_token=_UpperCAmelCase ,eos_token=_UpperCAmelCase ,add_prefix_space=_UpperCAmelCase ,**_UpperCAmelCase ,)
_a : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' ,_UpperCAmelCase ) != add_prefix_space:
_a : Optional[Any] = getattr(_UpperCAmelCase ,pre_tok_state.pop('type' ) )
_a : List[str] = add_prefix_space
_a : List[Any] = pre_tok_class(**_UpperCAmelCase )
_a : Optional[int] = add_prefix_space
def __lowercase ( self : Tuple ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[str] = None ):
_a : str = self._tokenizer.model.save(_UpperCAmelCase ,name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
def __lowercase ( self : Tuple ,_UpperCAmelCase : "Conversation" ):
_a : Optional[int] = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase ) + [self.eos_token_id] )
if len(_UpperCAmelCase ) > self.model_max_length:
_a : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 358
|
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
return params[f"""{prefix}/{prefix}/relpos_bias/rel_embedding"""][:, i, :]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="attention" ) -> int:
_a : int = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/key/kernel"""][:, i, :, :] )
_a : Dict = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
_a : Any = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/out/kernel"""][:, i, :, :] )
_a : int = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/query/kernel"""][:, i, :, :] )
_a : Dict = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
_a : List[str] = np.ascontiguousarray(params[f"""{prefix}/{prefix}/{layer_name}/value/kernel"""][:, i, :, :] )
_a : int = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False ) -> Optional[Any]:
if split_mlp_wi:
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_0/kernel"""][:, i, :]
_a : Dict = params[f"""{prefix}/{prefix}/mlp/wi_1/kernel"""][:, i, :]
_a : Optional[int] = (wi_a, wi_a)
else:
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wi/kernel"""][:, i, :]
_a : Tuple = params[f"""{prefix}/{prefix}/mlp/wo/kernel"""][:, i, :]
return wi, wo
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
return params[f"""{prefix}/{prefix}/{layer_name}/scale"""][:, i]
def __lowerCamelCase ( lowerCAmelCase_ , *, lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> Any:
_a : Dict = traverse_util.flatten_dict(variables['target'] )
_a : Tuple = {'/'.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
_a : int = 'encoder/encoder/mlp/wi_0/kernel' in old
print('Split MLP:' , lowerCAmelCase_ )
_a : str = collections.OrderedDict()
# Shared embeddings.
_a : List[str] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Any = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_attention_layer_norm' )
_a , _a , _a , _a : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Dict = o.T
_a : int = q.T
_a : List[str] = v.T
# Block i, layer 1 (MLP).
_a : Optional[int] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , 'pre_mlp_layer_norm' )
_a , _a : Any = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' , lowerCAmelCase_ )
_a : Optional[Any] = layer_norm
if split_mlp_wi:
_a : Tuple = wi[0].T
_a : List[str] = wi[1].T
else:
_a : Union[str, Any] = wi.T
_a : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , lowerCAmelCase_ , 'encoder' ).T
_a : Optional[Any] = old['encoder/encoder_norm/scale']
if not scalable_attention:
_a : Union[str, Any] = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'encoder' ).T
_a : Any = tax_relpos_bias_lookup(
lowerCAmelCase_ , 0 , 'decoder' ).T
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
_a : Dict = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_self_attention_layer_norm' )
_a , _a , _a , _a : Union[str, Any] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'self_attention' )
_a : str = layer_norm
_a : List[Any] = k.T
_a : Union[str, Any] = o.T
_a : int = q.T
_a : List[Any] = v.T
# Block i, layer 1 (Cross Attention).
_a : List[Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_cross_attention_layer_norm' )
_a , _a , _a , _a : Optional[int] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'encoder_decoder_attention' )
_a : str = layer_norm
_a : Union[str, Any] = k.T
_a : Union[str, Any] = o.T
_a : Any = q.T
_a : str = v.T
# Block i, layer 2 (MLP).
_a : str = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , 'pre_mlp_layer_norm' )
_a , _a : int = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' , lowerCAmelCase_ )
_a : List[str] = layer_norm
if split_mlp_wi:
_a : List[str] = wi[0].T
_a : Union[str, Any] = wi[1].T
else:
_a : Optional[Any] = wi.T
_a : List[str] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
_a : int = tax_relpos_bias_lookup(lowerCAmelCase_ , lowerCAmelCase_ , 'decoder' ).T
_a : List[Any] = old['decoder/decoder_norm/scale']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
_a : List[Any] = old['decoder/logits_dense/kernel'].T
return new
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_a : List[str] = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
_a : Tuple = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
_a : Union[str, Any] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
_a : Optional[Any] = state_dict['shared.weight']
return state_dict
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
_a : Any = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
_a : str = convert_tax_to_pytorch(
lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ , scalable_attention=lowerCAmelCase_ )
_a : Dict = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = False , ) -> str:
_a : int = MTaConfig.from_json_file(lowerCAmelCase_ )
print(f"""Building PyTorch model from configuration: {config}""" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
_a : Any = UMTaEncoderModel(lowerCAmelCase_ )
else:
_a : Optional[int] = UMTaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print('Done' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser(description='''Converts a native T5X checkpoint into a PyTorch checkpoint.''')
# Required parameters
parser.add_argument(
'''--t5x_checkpoint_path''', default=None, type=str, required=True, help='''Path to the T5X checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--is_encoder_only''', action='''store_true''', help='''Check if the model is encoder-decoder model''', default=False
)
parser.add_argument(
'''--scalable_attention''',
action='''store_true''',
help='''Whether the model uses scaled attention (umt5 model)''',
default=False,
)
__lowerCAmelCase = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 358
| 1
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowercase ( UpperCamelCase_ ):
def __init__( self :str , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Optional[Any] ) -> List[str]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = dataset
__SCREAMING_SNAKE_CASE : Optional[Any] = process
__SCREAMING_SNAKE_CASE : Union[str, Any] = params
def __len__( self :Tuple ) -> Any:
return len(self.dataset )
def __getitem__( self :List[str] , lowerCAmelCase__ :Optional[Any] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = self.dataset[i]
__SCREAMING_SNAKE_CASE : Optional[int] = self.process(UpperCamelCase__ , **self.params )
return processed
class _lowercase ( UpperCamelCase_ ):
def __init__( self :Dict , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :List[Any]=None ) -> str:
__SCREAMING_SNAKE_CASE : List[Any] = loader
__SCREAMING_SNAKE_CASE : List[Any] = infer
__SCREAMING_SNAKE_CASE : int = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : List[str] = loader_batch_size
# Internal bookkeeping
__SCREAMING_SNAKE_CASE : Optional[int] = None
__SCREAMING_SNAKE_CASE : int = None
def __len__( self :int ) -> str:
return len(self.loader )
def __iter__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : Optional[int] = iter(self.loader )
return self
def __magic_name__( self :List[str] ) -> int:
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__SCREAMING_SNAKE_CASE : Optional[Any] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Convert ModelOutput to tuple first
__SCREAMING_SNAKE_CASE : Dict = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE : Dict = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase__ , UpperCamelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
__SCREAMING_SNAKE_CASE : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__SCREAMING_SNAKE_CASE : int = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE : Union[str, Any] = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__SCREAMING_SNAKE_CASE : Tuple = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__SCREAMING_SNAKE_CASE : Tuple = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__SCREAMING_SNAKE_CASE : Any = self._loader_batch_data.__class__(UpperCamelCase__ )
self._loader_batch_index += 1
return result
def __magic_name__( self :Union[str, Any] ) -> List[str]:
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__SCREAMING_SNAKE_CASE : Tuple = next(self.iterator )
__SCREAMING_SNAKE_CASE : List[Any] = self.infer(UpperCamelCase__ , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCamelCase__ , torch.Tensor ):
__SCREAMING_SNAKE_CASE : Optional[int] = processed
else:
__SCREAMING_SNAKE_CASE : int = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE : int = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
__SCREAMING_SNAKE_CASE : Dict = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE : List[Any] = observed_batch_size
# Setting internal index to unwrap the batch
__SCREAMING_SNAKE_CASE : List[Any] = processed
__SCREAMING_SNAKE_CASE : int = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowercase ( UpperCamelCase_ ):
def __init__( self :str , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any]=None ) -> Any:
super().__init__(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def __iter__( self :Union[str, Any] ) -> Tuple:
__SCREAMING_SNAKE_CASE : Tuple = iter(self.loader )
__SCREAMING_SNAKE_CASE : List[Any] = None
return self
def __magic_name__( self :List[str] ) -> Optional[Any]:
if self.subiterator is None:
__SCREAMING_SNAKE_CASE : Dict = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
__SCREAMING_SNAKE_CASE : Any = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__SCREAMING_SNAKE_CASE : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
__SCREAMING_SNAKE_CASE : Union[str, Any] = next(self.subiterator )
return processed
class _lowercase ( UpperCamelCase_ ):
def __iter__( self :Tuple ) -> str:
__SCREAMING_SNAKE_CASE : Dict = iter(self.loader )
return self
def __magic_name__( self :List[str] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[int] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE : Tuple = self.loader_batch_item()
__SCREAMING_SNAKE_CASE : Any = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
while not is_last:
__SCREAMING_SNAKE_CASE : Any = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCamelCase__ , torch.Tensor ):
__SCREAMING_SNAKE_CASE : Tuple = processed
else:
__SCREAMING_SNAKE_CASE : Union[str, Any] = list(processed.keys() )[0]
__SCREAMING_SNAKE_CASE : List[str] = processed[key]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE : List[Any] = len(UpperCamelCase__ )
else:
__SCREAMING_SNAKE_CASE : int = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__SCREAMING_SNAKE_CASE : List[str] = observed_batch_size
__SCREAMING_SNAKE_CASE : List[Any] = processed
__SCREAMING_SNAKE_CASE : str = 0
while self._loader_batch_index < self.loader_batch_size:
__SCREAMING_SNAKE_CASE : Any = self.loader_batch_item()
__SCREAMING_SNAKE_CASE : List[Any] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
if is_last:
return accumulator
else:
__SCREAMING_SNAKE_CASE : int = processed
__SCREAMING_SNAKE_CASE : List[str] = item.pop('''is_last''' )
accumulator.append(UpperCamelCase__ )
return accumulator
class _lowercase ( UpperCamelCase_ ):
def __init__( self :Optional[Any] , lowerCAmelCase__ :Dataset , lowerCAmelCase__ :str ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = dataset
__SCREAMING_SNAKE_CASE : Dict = key
def __len__( self :Optional[int] ) -> List[Any]:
return len(self.dataset )
def __getitem__( self :Dict , lowerCAmelCase__ :Tuple ) -> Any:
return self.dataset[i][self.key]
class _lowercase ( UpperCamelCase_ ):
def __init__( self :List[Any] , lowerCAmelCase__ :Dataset , lowerCAmelCase__ :str , lowerCAmelCase__ :str ) -> Dict:
__SCREAMING_SNAKE_CASE : str = dataset
__SCREAMING_SNAKE_CASE : List[str] = keya
__SCREAMING_SNAKE_CASE : Tuple = keya
def __len__( self :List[str] ) -> int:
return len(self.dataset )
def __getitem__( self :Union[str, Any] , lowerCAmelCase__ :Any ) -> List[str]:
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 712
|
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :List[str] , lowerCAmelCase__ :TransformeraDModel , lowerCAmelCase__ :AutoencoderKL , lowerCAmelCase__ :KarrasDiffusionSchedulers , lowerCAmelCase__ :Optional[Dict[int, str]] = None , ) -> List[Any]:
super().__init__()
self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
# create a imagenet -> id dictionary for easier use
__SCREAMING_SNAKE_CASE : Dict = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split(''',''' ):
__SCREAMING_SNAKE_CASE : int = int(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = dict(sorted(self.labels.items() ) )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Union[str, List[str]] ) -> List[int]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = list(lowerCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f'''{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.''' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Tuple , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :float = 4.0 , lowerCAmelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ :int = 50 , lowerCAmelCase__ :Optional[str] = "pil" , lowerCAmelCase__ :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE : Any = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.transformer.config.sample_size
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer.config.in_channels
__SCREAMING_SNAKE_CASE : Union[str, Any] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
__SCREAMING_SNAKE_CASE : str = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([1_000] * batch_size , device=self.device )
__SCREAMING_SNAKE_CASE : Tuple = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input[: len(lowerCAmelCase__ ) // 2]
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([half, half] , dim=0 )
__SCREAMING_SNAKE_CASE : Dict = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = t
if not torch.is_tensor(lowerCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
__SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.device.type == '''mps'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE : Tuple = torch.floataa if is_mps else torch.floataa
else:
__SCREAMING_SNAKE_CASE : List[Any] = torch.intaa if is_mps else torch.intaa
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE : str = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__SCREAMING_SNAKE_CASE : str = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
__SCREAMING_SNAKE_CASE : Optional[Any] = self.transformer(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
__SCREAMING_SNAKE_CASE : Any = torch.cat([half_eps, half_eps] , dim=0 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = noise_pred
# compute previous image: x_t -> x_t-1
__SCREAMING_SNAKE_CASE : Tuple = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
if guidance_scale > 1:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = latent_model_input.chunk(2 , dim=0 )
else:
__SCREAMING_SNAKE_CASE : List[str] = latent_model_input
__SCREAMING_SNAKE_CASE : Dict = 1 / self.vae.config.scaling_factor * latents
__SCREAMING_SNAKE_CASE : Tuple = self.vae.decode(lowerCAmelCase__ ).sample
__SCREAMING_SNAKE_CASE : int = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Dict = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE : str = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 260
| 0
|
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=UpperCamelCase__ ):
a : int = ["""torch""", """scipy"""]
def __init__( self , *A , **A ) -> str:
'''simple docstring'''
requires_backends(self , ["torch", "scipy"] )
@classmethod
def lowerCAmelCase_ ( cls , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def lowerCAmelCase_ ( cls , *A , **A ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
| 515
|
import logging
import numpy as np
import pytest
from scipy.linalg import eigh
logging.basicConfig(level=logging.INFO, format="%(message)s")
def SCREAMING_SNAKE_CASE ( __UpperCamelCase) -> np.ndarray:
return input_array.reshape((input_array.size, 1))
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> np.ndarray:
a = np.nan
for i in range(__UpperCamelCase):
a = features[:, labels == i]
a = data.mean(1)
# Centralize the data of class i
a = data - column_reshape(__UpperCamelCase)
if i > 0:
# If covariance_sum is not None
covariance_sum += np.dot(__UpperCamelCase , centered_data.T)
else:
# If covariance_sum is np.nan (i.e. first loop)
a = np.dot(__UpperCamelCase , centered_data.T)
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> np.ndarray:
a = features.mean(1)
a = np.nan
for i in range(__UpperCamelCase):
a = features[:, labels == i]
a = data.shape[1]
a = data.mean(1)
if i > 0:
# If covariance_sum is not None
covariance_sum += device_data * np.dot(
column_reshape(__UpperCamelCase) - column_reshape(__UpperCamelCase) , (column_reshape(__UpperCamelCase) - column_reshape(__UpperCamelCase)).T , )
else:
# If covariance_sum is np.nan (i.e. first loop)
a = device_data * np.dot(
column_reshape(__UpperCamelCase) - column_reshape(__UpperCamelCase) , (column_reshape(__UpperCamelCase) - column_reshape(__UpperCamelCase)).T , )
return covariance_sum / features.shape[1]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase) -> np.ndarray:
# Check if the features have been loaded
if features.any():
a = features.mean(1)
# Center the dataset
a = features - np.reshape(__UpperCamelCase , (data_mean.size, 1))
a = np.dot(__UpperCamelCase , centered_data.T) / features.shape[1]
a , a = np.linalg.eigh(__UpperCamelCase)
# Take all the columns in the reverse order (-1), and then takes only the first
a = eigenvectors[:, ::-1][:, 0:dimensions]
# Project the database on the new space
a = np.dot(filtered_eigenvectors.T , __UpperCamelCase)
logging.info("Principal Component Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__UpperCamelCase)
logging.error("Dataset empty")
raise AssertionError
def SCREAMING_SNAKE_CASE ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase) -> np.ndarray:
assert classes > dimensions
# Check if features have been already loaded
if features.any:
a , a = eigh(
covariance_between_classes(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) , covariance_within_classes(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase) , )
a = eigenvectors[:, ::-1][:, :dimensions]
a , a , a = np.linalg.svd(__UpperCamelCase)
a = svd_matrix[:, 0:dimensions]
a = np.dot(filtered_svd_matrix.T , __UpperCamelCase)
logging.info("Linear Discriminant Analysis computed")
return projected_data
else:
logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=__UpperCamelCase)
logging.error("Dataset empty")
raise AssertionError
def SCREAMING_SNAKE_CASE ( ) -> None:
# Create dummy dataset with 2 classes and 3 features
a = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]])
a = np.array([0, 0, 0, 1, 1])
a = 2
a = 2
# Assert that the function raises an AssertionError if dimensions > classes
with pytest.raises(__UpperCamelCase) as error_info:
a = linear_discriminant_analysis(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase)
if isinstance(__UpperCamelCase , np.ndarray):
raise AssertionError(
"Did not raise AssertionError for dimensions > classes")
assert error_info.type is AssertionError
def SCREAMING_SNAKE_CASE ( ) -> None:
a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
a = 2
a = np.array([[6.92_820_323, 8.66_025_404, 10.39_230_485], [3.0, 3.0, 3.0]])
with pytest.raises(__UpperCamelCase) as error_info:
a = principal_component_analysis(__UpperCamelCase , __UpperCamelCase)
if not np.allclose(__UpperCamelCase , __UpperCamelCase):
raise AssertionError
assert error_info.type is AssertionError
if __name__ == "__main__":
import doctest
doctest.testmod()
| 515
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : str = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = "lilt"
def __init__( self , __UpperCamelCase=3_0522 , __UpperCamelCase=768 , __UpperCamelCase=12 , __UpperCamelCase=12 , __UpperCamelCase=3072 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=512 , __UpperCamelCase=2 , __UpperCamelCase=0.0_2 , __UpperCamelCase=1E-12 , __UpperCamelCase=0 , __UpperCamelCase="absolute" , __UpperCamelCase=None , __UpperCamelCase=4 , __UpperCamelCase=1024 , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
__a : Optional[Any] = vocab_size
__a : Union[str, Any] = hidden_size
__a : Optional[int] = num_hidden_layers
__a : Tuple = num_attention_heads
__a : int = hidden_act
__a : int = intermediate_size
__a : Optional[int] = hidden_dropout_prob
__a : int = attention_probs_dropout_prob
__a : str = max_position_embeddings
__a : Optional[int] = type_vocab_size
__a : Union[str, Any] = initializer_range
__a : str = layer_norm_eps
__a : List[Any] = position_embedding_type
__a : str = classifier_dropout
__a : Optional[Any] = channel_shrink_ratio
__a : List[str] = max_ad_position_embeddings
| 706
|
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase ):
lowercase__ = ""
lowercase__ = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , ):
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
__a : int = repo_info
__a : int = token
__a : Any = None
def __lowerCamelCase ( self ):
'''simple docstring'''
if self.dir_cache is None:
__a : Union[str, Any] = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__a : List[str] = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase = "rb" , **__UpperCamelCase , ):
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
__a : Any = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def __lowerCamelCase ( self , __UpperCamelCase , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : str = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def __lowerCamelCase ( self , __UpperCamelCase , __UpperCamelCase=False , **__UpperCamelCase ):
'''simple docstring'''
self._get_dirs()
__a : int = PurePosixPath(path.strip("""/""" ) )
__a : List[str] = {}
for p, f in self.dir_cache.items():
__a : str = PurePosixPath(p.strip("""/""" ) )
__a : Optional[int] = p.parent
if root == path:
__a : List[str] = f
__a : str = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 697
| 0
|
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = ''''''
lowerCAmelCase_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
lowerCAmelCase_ = None # compression type in fsspec. ex: "gzip"
lowerCAmelCase_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , _A : str = "" , _A : Optional[str] = None , _A : Optional[dict] = None , **_A : List[Any] ):
"""simple docstring"""
super().__init__(self , **_A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__SCREAMING_SNAKE_CASE : Tuple = fsspec.open(
_A , mode='''rb''' , protocol=_A , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__SCREAMING_SNAKE_CASE : Optional[Any] = os.path.basename(self.file.path.split('''::''' )[0] )
__SCREAMING_SNAKE_CASE : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__SCREAMING_SNAKE_CASE : Any = None
@classmethod
def UpperCAmelCase__ ( cls : List[str] , _A : Optional[int] ):
"""simple docstring"""
return super()._strip_protocol(_A ).lstrip('''/''' )
def UpperCAmelCase__ ( self : List[Any] ):
"""simple docstring"""
if self.dir_cache is None:
__SCREAMING_SNAKE_CASE : List[str] = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__SCREAMING_SNAKE_CASE : Union[str, Any] = {f['''name''']: f}
def UpperCAmelCase__ ( self : Dict , _A : str ):
"""simple docstring"""
return self.file.open().read()
def UpperCAmelCase__ ( self : Any , _A : str , _A : str = "rb" , _A : Tuple=None , _A : Union[str, Any]=True , _A : Dict=None , **_A : List[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self._strip_protocol(_A )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''bz2'''
lowerCAmelCase_ = '''bz2'''
lowerCAmelCase_ = '''.bz2'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''gzip'''
lowerCAmelCase_ = '''gzip'''
lowerCAmelCase_ = '''.gz'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''lz4'''
lowerCAmelCase_ = '''lz4'''
lowerCAmelCase_ = '''.lz4'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''xz'''
lowerCAmelCase_ = '''xz'''
lowerCAmelCase_ = '''.xz'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''zstd'''
lowerCAmelCase_ = '''zstd'''
lowerCAmelCase_ = '''.zst'''
def __init__( self : str , _A : str , _A : str = "rb" , _A : Optional[str] = None , _A : Optional[dict] = None , _A : int = DEFAULT_BLOCK_SIZE , **_A : str , ):
"""simple docstring"""
super().__init__(
fo=_A , mode=_A , target_protocol=_A , target_options=_A , block_size=_A , **_A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__SCREAMING_SNAKE_CASE : Optional[Any] = self.file.__enter__
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : int , _A : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = file_
def __enter__( self : int ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Any , *_A : int , **_A : List[Any] ):
"""simple docstring"""
self._file.__exit__(*_A , **_A )
def __iter__( self : List[Any] ):
"""simple docstring"""
return iter(self._file )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : Dict , _A : Optional[int] ):
"""simple docstring"""
return getattr(self._file , _A )
def fixed_enter(*_A : List[str] , **_A : str ):
return WrappedFile(_enter(*_A , **_A ) )
__SCREAMING_SNAKE_CASE : Optional[int] = fixed_enter
| 74
|
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = MobileBertTokenizer
lowerCAmelCase_ = MobileBertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = filter_non_english
lowerCAmelCase_ = '''google/mobilebert-uncased'''
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
super().setUp()
__SCREAMING_SNAKE_CASE : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE : int = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def UpperCAmelCase__ ( self : Tuple , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : List[str] = '''unwanted, running'''
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class(self.vocab_file )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_A , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
__SCREAMING_SNAKE_CASE : List[str] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Optional[Any] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : str = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Any = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
# With lower casing
__SCREAMING_SNAKE_CASE : Any = self.get_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = self.get_rust_tokenizer(do_lower_case=_A )
__SCREAMING_SNAKE_CASE : List[str] = '''UNwant\u00E9d,running'''
__SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : int = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE : Any = tokenizer.encode(_A )
__SCREAMING_SNAKE_CASE : Optional[int] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def UpperCAmelCase__ ( self : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = BasicTokenizer(do_lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = BasicTokenizer(do_lower_case=_A , strip_accents=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = BasicTokenizer(do_lower_case=_A , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
__SCREAMING_SNAKE_CASE : Dict = {}
for i, token in enumerate(_A ):
__SCREAMING_SNAKE_CASE : List[str] = i
__SCREAMING_SNAKE_CASE : str = WordpieceTokenizer(vocab=_A , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def UpperCAmelCase__ ( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(_A ) for t in ['''Test''', '''\xad''', '''test''']] , [['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class.from_pretrained('''google/mobilebert-uncased''' )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer.encode('''sequence builders''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_A , _A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : str = F'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer_r.encode_plus(
_A , return_attention_mask=_A , return_token_type_ids=_A , return_offsets_mapping=_A , add_special_tokens=_A , )
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer_r.do_lower_case if hasattr(_A , '''do_lower_case''' ) else False
__SCREAMING_SNAKE_CASE : Optional[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''Allen'''),
((21, 23), '''##NL'''),
((23, 24), '''##P'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 15), tokenizer_r.mask_token),
((16, 21), '''allen'''),
((21, 23), '''##nl'''),
((23, 24), '''##p'''),
((25, 33), '''sentence'''),
((33, 34), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['''offset_mapping'''] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = ['''的''', '''人''', '''有''']
__SCREAMING_SNAKE_CASE : int = ''''''.join(_A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__SCREAMING_SNAKE_CASE : str = True
__SCREAMING_SNAKE_CASE : int = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : int = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Tuple = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Any = self.rust_tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[str] = self.tokenizer_class.from_pretrained(_A , **_A )
__SCREAMING_SNAKE_CASE : List[Any] = tokenizer_r.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.encode(_A , add_special_tokens=_A )
__SCREAMING_SNAKE_CASE : Dict = tokenizer_r.convert_ids_to_tokens(_A )
__SCREAMING_SNAKE_CASE : int = tokenizer_p.convert_ids_to_tokens(_A )
# it is expected that only the first Chinese character is not preceded by "##".
__SCREAMING_SNAKE_CASE : List[Any] = [
F'''##{token}''' if idx != 0 else token for idx, token in enumerate(_A )
]
self.assertListEqual(_A , _A )
self.assertListEqual(_A , _A )
| 74
| 1
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = [
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self , **__UpperCAmelCase ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =deprecated_arg[3:]
SCREAMING_SNAKE_CASE_ : str =not kwargs.pop(__UpperCAmelCase )
logger.warning(
F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
F""" {positive_arg}={kwargs[positive_arg]}""" )
SCREAMING_SNAKE_CASE_ : List[Any] =kwargs.pop('tpu_name' , self.tpu_name )
SCREAMING_SNAKE_CASE_ : Optional[Any] =kwargs.pop('device_idx' , self.device_idx )
SCREAMING_SNAKE_CASE_ : str =kwargs.pop('eager_mode' , self.eager_mode )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =kwargs.pop('use_xla' , self.use_xla )
super().__init__(**__UpperCAmelCase )
_lowercase = field(
default=__A , metadata={'help': 'Name of TPU'} , )
_lowercase = field(
default=0 , metadata={'help': 'CPU / GPU device index. Defaults to 0.'} , )
_lowercase = field(default=__A , metadata={'help': 'Benchmark models in eager model.'} )
_lowercase = field(
default=__A , metadata={
'help': 'Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.'
} , )
@cached_property
def __lowerCamelCase ( self ):
requires_backends(self , ['tf'] )
SCREAMING_SNAKE_CASE_ : Any =None
if self.tpu:
try:
if self.tpu_name:
SCREAMING_SNAKE_CASE_ : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
SCREAMING_SNAKE_CASE_ : int =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
SCREAMING_SNAKE_CASE_ : int =None
return tpu
@cached_property
def __lowerCamelCase ( self ):
requires_backends(self , ['tf'] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
SCREAMING_SNAKE_CASE_ : str =tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , 'GPU' )
SCREAMING_SNAKE_CASE_ : Optional[int] =tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , 'GPU' ) # disable GPU
SCREAMING_SNAKE_CASE_ : Any =tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" )
return strategy
@property
def __lowerCamelCase ( self ):
requires_backends(self , ['tf'] )
return self._setup_tpu is not None
@property
def __lowerCamelCase ( self ):
requires_backends(self , ['tf'] )
return self._setup_strategy
@property
def __lowerCamelCase ( self ):
requires_backends(self , ['tf'] )
return tf.config.list_physical_devices('GPU' )
@property
def __lowerCamelCase ( self ):
requires_backends(self , ['tf'] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __lowerCamelCase ( self ):
return self.n_gpu > 0
| 718
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = ['pixel_values']
def __init__( self , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = True , __UpperCAmelCase = 1 / 255 , __UpperCAmelCase = True , __UpperCAmelCase = IMAGENET_DEFAULT_MEAN , __UpperCAmelCase = IMAGENET_DEFAULT_STD , **__UpperCAmelCase , ):
super().__init__(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple =size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] =crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Union[str, Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Tuple =do_resize
SCREAMING_SNAKE_CASE_ : Dict =size
SCREAMING_SNAKE_CASE_ : Tuple =resample
SCREAMING_SNAKE_CASE_ : List[str] =do_center_crop
SCREAMING_SNAKE_CASE_ : Optional[int] =crop_size
SCREAMING_SNAKE_CASE_ : int =do_rescale
SCREAMING_SNAKE_CASE_ : List[Any] =rescale_factor
SCREAMING_SNAKE_CASE_ : Any =do_normalize
SCREAMING_SNAKE_CASE_ : Tuple =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE_ : Tuple =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = PILImageResampling.BICUBIC , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : List[str] =int((256 / 224) * size['shortest_edge'] )
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_resize_output_image_size(__UpperCAmelCase , size=__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Tuple ={'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
__UpperCAmelCase , size=(size_dict['height'], size_dict['width']) , resample=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : List[Any] =get_size_dict(__UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(__UpperCAmelCase , size=(size['height'], size['width']) , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return rescale(__UpperCAmelCase , scale=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase , ):
return normalize(__UpperCAmelCase , mean=__UpperCAmelCase , std=__UpperCAmelCase , data_format=__UpperCAmelCase , **__UpperCAmelCase )
def __lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = ChannelDimension.FIRST , **__UpperCAmelCase , ):
SCREAMING_SNAKE_CASE_ : Optional[int] =do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : List[str] =resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Tuple =do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Union[str, Any] =do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Tuple =rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Tuple =do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int =image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : List[Any] =image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : List[str] =size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Any =get_size_dict(__UpperCAmelCase , default_to_square=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] =crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] =get_size_dict(__UpperCAmelCase , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ : Optional[Any] =make_list_of_images(__UpperCAmelCase )
if not valid_images(__UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : Any =[to_numpy_array(__UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Dict =[self.resize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Any =[self.center_crop(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : List[Any] =[self.rescale(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : List[str] =[self.normalize(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple =[to_channel_dimension_format(__UpperCAmelCase , __UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Tuple ={'pixel_values': images}
return BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
| 153
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class a_ :
UpperCamelCase_ : Any = XGLMConfig
UpperCamelCase_ : int = {}
UpperCamelCase_ : Tuple = "gelu"
def __init__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : List[str]=14 , snake_case__ : Union[str, Any]=7 , snake_case__ : Optional[Any]=True , snake_case__ : Optional[int]=True , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=99 , snake_case__ : Optional[Any]=32 , snake_case__ : List[Any]=2 , snake_case__ : Optional[Any]=4 , snake_case__ : List[str]=37 , snake_case__ : Optional[Any]="gelu" , snake_case__ : str=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[Any]=512 , snake_case__ : List[Any]=0.02 , ):
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = seq_length
lowerCAmelCase__ = is_training
lowerCAmelCase__ = use_input_mask
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = num_hidden_layers
lowerCAmelCase__ = num_attention_heads
lowerCAmelCase__ = ffn_dim
lowerCAmelCase__ = activation_function
lowerCAmelCase__ = activation_dropout
lowerCAmelCase__ = attention_dropout
lowerCAmelCase__ = max_position_embeddings
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = None
lowerCAmelCase__ = 0
lowerCAmelCase__ = 2
lowerCAmelCase__ = 1
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
return XGLMConfig.from_pretrained("""facebook/xglm-564M""" )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
lowerCAmelCase__ = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) , clip_value_min=0 , clip_value_max=3 )
lowerCAmelCase__ = None
if self.use_input_mask:
lowerCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ = self.get_config()
lowerCAmelCase__ = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
input_mask,
head_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Dict ):
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=snake_case__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=snake_case__ , )
def _SCREAMING_SNAKE_CASE ( self : str ):
lowerCAmelCase__ = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) = config_and_inputs
lowerCAmelCase__ = {
"""input_ids""": input_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_tf
class a_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
UpperCamelCase_ : str = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
UpperCamelCase_ : Union[str, Any] = (TFXGLMForCausalLM,) if is_tf_available() else ()
UpperCamelCase_ : Tuple = (
{"feature-extraction": TFXGLMModel, "text-generation": TFXGLMForCausalLM} if is_tf_available() else {}
)
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : Optional[Any] = False
UpperCamelCase_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
lowerCAmelCase__ = TFXGLMModelTester(self )
lowerCAmelCase__ = ConfigTester(self , config_class=snake_case__ , n_embd=37 )
def _SCREAMING_SNAKE_CASE ( self : Dict ):
self.config_tester.run_common_tests()
@slow
def _SCREAMING_SNAKE_CASE ( self : int ):
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = TFXGLMModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
@unittest.skip(reason="""Currently, model embeddings are going to undergo a major refactor.""" )
def _SCREAMING_SNAKE_CASE ( self : str ):
super().test_resize_token_embeddings()
@require_tf
class a_ ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self : int , snake_case__ : Optional[int]=True ):
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa ) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
lowerCAmelCase__ = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
lowerCAmelCase__ = model.generate(snake_case__ , do_sample=snake_case__ , num_beams=1 )
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
tf.random.set_seed(0 )
lowerCAmelCase__ = tokenizer("""Today is a nice day and""" , return_tensors="""tf""" )
lowerCAmelCase__ = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(""":/CPU:0""" ):
lowerCAmelCase__ = model.generate(snake_case__ , do_sample=snake_case__ , seed=[7, 0] )
lowerCAmelCase__ = tokenizer.decode(output_ids[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = (
"""Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"""
)
self.assertEqual(snake_case__ , snake_case__ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
lowerCAmelCase__ = TFXGLMForCausalLM.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
lowerCAmelCase__ = """left"""
# use different length sentences to test batching
lowerCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When""",
"""Hello, my dog is a little""",
]
lowerCAmelCase__ = tokenizer(snake_case__ , return_tensors="""tf""" , padding=snake_case__ )
lowerCAmelCase__ = inputs["""input_ids"""]
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , attention_mask=inputs["""attention_mask"""] , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCAmelCase__ = model.generate(input_ids=snake_case__ , max_new_tokens=12 )
lowerCAmelCase__ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
lowerCAmelCase__ = [
"""This is an extremelly long sentence that only exists to test the ability of the model to cope with """
"""left-padding, such as in batched generation. The output for the sequence below should be the same """
"""regardless of whether left padding is applied or not. When left padding is applied, the sequence will be """
"""a single""",
"""Hello, my dog is a little bit of a shy one, but he is very friendly""",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
| 644
|
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
return " ".join(
"""""".join(word[::-1] ) if len(lowerCamelCase__ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("Hey wollef sroirraw"))
| 644
| 1
|
def UpperCamelCase_ ( __a ) -> bool:
a__ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase_ ( __a = 5_000 ) -> int:
a__ : List[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , __a )]
for i, pentagonal_i in enumerate(__a ):
for j in range(__a , len(__a ) ):
a__ : Dict = pentagonal_nums[j]
a__ : Optional[Any] = pentagonal_i + pentagonal_j
a__ : str = pentagonal_j - pentagonal_i
if is_pentagonal(__a ) and is_pentagonal(__a ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 717
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class A__ ( A__ ):
"""simple docstring"""
_lowercase = 'MCTCTFeatureExtractor'
_lowercase = 'AutoTokenizer'
def __init__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ):
super().__init__(lowerCamelCase__ , lowerCamelCase__ )
a__ : int = self.feature_extractor
a__ : Any = False
def __call__( self : str , *lowerCamelCase__ : Optional[Any] , **lowerCamelCase__ : List[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCamelCase__ , **lowerCamelCase__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
a__ : Optional[Any] = kwargs.pop("raw_speech" )
else:
a__ : int = kwargs.pop("audio" , lowerCamelCase__ )
a__ : Dict = kwargs.pop("sampling_rate" , lowerCamelCase__ )
a__ : List[str] = kwargs.pop("text" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
a__ : Optional[int] = args[0]
a__ : List[str] = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
a__ : Dict = self.feature_extractor(lowerCamelCase__ , *lowerCamelCase__ , sampling_rate=lowerCamelCase__ , **lowerCamelCase__ )
if text is not None:
a__ : str = self.tokenizer(lowerCamelCase__ , **lowerCamelCase__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
a__ : Union[str, Any] = encodings["input_ids"]
return inputs
def _UpperCamelCase( self : str , *lowerCamelCase__ : Dict , **lowerCamelCase__ : str ):
return self.tokenizer.batch_decode(*lowerCamelCase__ , **lowerCamelCase__ )
def _UpperCamelCase( self : str , *lowerCamelCase__ : List[Any] , **lowerCamelCase__ : Optional[Any] ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCamelCase__ , **lowerCamelCase__ )
a__ : Any = kwargs.pop("input_features" , lowerCamelCase__ )
a__ : Any = kwargs.pop("labels" , lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
a__ : Optional[int] = args[0]
a__ : Any = args[1:]
if input_features is not None:
a__ : str = self.feature_extractor.pad(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
if labels is not None:
a__ : Dict = self.tokenizer.pad(lowerCamelCase__ , **lowerCamelCase__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
a__ : List[str] = labels["input_ids"]
return input_features
def _UpperCamelCase( self : List[Any] , *lowerCamelCase__ : Optional[int] , **lowerCamelCase__ : Any ):
return self.tokenizer.decode(*lowerCamelCase__ , **lowerCamelCase__ )
@contextmanager
def _UpperCamelCase( self : Optional[Any] ):
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
a__ : str = True
a__ : List[Any] = self.tokenizer
yield
a__ : Any = self.feature_extractor
a__ : List[str] = False
| 151
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''funnel'''
lowerCAmelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
}
def __init__( self : Dict , _A : Any=3_0522 , _A : Tuple=[4, 4, 4] , _A : Optional[Any]=None , _A : int=2 , _A : Any=768 , _A : str=12 , _A : Any=64 , _A : Union[str, Any]=3072 , _A : Any="gelu_new" , _A : List[Any]=0.1 , _A : List[Any]=0.1 , _A : List[Any]=0.0 , _A : int=0.1 , _A : Optional[int]=None , _A : Tuple=1e-9 , _A : Optional[Any]="mean" , _A : Dict="relative_shift" , _A : int=True , _A : List[str]=True , _A : List[Any]=True , **_A : List[Any] , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = vocab_size
__SCREAMING_SNAKE_CASE : Dict = block_sizes
__SCREAMING_SNAKE_CASE : Optional[Any] = [1] * len(_A ) if block_repeats is None else block_repeats
assert len(_A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
__SCREAMING_SNAKE_CASE : Union[str, Any] = num_decoder_layers
__SCREAMING_SNAKE_CASE : Union[str, Any] = d_model
__SCREAMING_SNAKE_CASE : int = n_head
__SCREAMING_SNAKE_CASE : int = d_head
__SCREAMING_SNAKE_CASE : Dict = d_inner
__SCREAMING_SNAKE_CASE : Any = hidden_act
__SCREAMING_SNAKE_CASE : Any = hidden_dropout
__SCREAMING_SNAKE_CASE : List[str] = attention_dropout
__SCREAMING_SNAKE_CASE : Union[str, Any] = activation_dropout
__SCREAMING_SNAKE_CASE : List[Any] = initializer_range
__SCREAMING_SNAKE_CASE : List[Any] = initializer_std
__SCREAMING_SNAKE_CASE : Union[str, Any] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], F'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
__SCREAMING_SNAKE_CASE : Optional[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], F'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
__SCREAMING_SNAKE_CASE : int = attention_type
__SCREAMING_SNAKE_CASE : Dict = separate_cls
__SCREAMING_SNAKE_CASE : Optional[int] = truncate_seq
__SCREAMING_SNAKE_CASE : Any = pool_q_only
super().__init__(**_A )
@property
def UpperCAmelCase__ ( self : Any ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCAmelCase__ ( self : Dict , _A : List[Any] ):
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.''' )
@property
def UpperCAmelCase__ ( self : Tuple ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def UpperCAmelCase__ ( self : Optional[int] , _A : Optional[Any] ):
"""simple docstring"""
raise NotImplementedError('''This model does not support the setting of `num_blocks`. Please set `block_sizes`.''' )
| 74
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase : Optional[Any] = abspath(join(dirname(dirname(dirname(__file__))), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(SCREAMING_SNAKE_CASE__ )
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
from transformers.testing_utils import pytest_terminal_summary_main
lowercase : Dict = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(SCREAMING_SNAKE_CASE__ , id=SCREAMING_SNAKE_CASE__ )
| 336
| 0
|
import os
def __lowerCamelCase ( A__ : str = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
lowerCamelCase_ : Tuple = in_file.read()
lowerCamelCase_ : List[str] = [[int(A__ ) for cell in row.split(""",""" )] for row in data.strip().splitlines()]
lowerCamelCase_ : Union[str, Any] = [[0 for cell in row] for row in grid]
lowerCamelCase_ : Optional[Any] = len(grid[0] )
lowerCamelCase_ : int = [[0 for i in range(A__ )] for j in range(A__ )]
lowerCamelCase_ : str = grid[0][0]
for i in range(1 , A__ ):
lowerCamelCase_ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
lowerCamelCase_ : str = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
lowerCamelCase_ : Union[str, Any] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'{solution() = }')
| 709
|
import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
snake_case__ : Optional[int] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
def __init__( self : Any , *__a : Union[str, Any] , **__a : List[str] ) ->None:
warnings.warn(
"""The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use PerceiverImageProcessor instead.""" , __a , )
super().__init__(*__a , **__a )
| 171
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
inspect_dataset(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = path + '''.py'''
assert script_name in os.listdir(SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
inspect_metric(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ = path + '''.py'''
assert script_name in os.listdir(SCREAMING_SNAKE_CASE )
assert "__pycache__" not in os.listdir(SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
get_dataset_config_info(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = get_dataset_config_names(SCREAMING_SNAKE_CASE )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = get_dataset_infos(SCREAMING_SNAKE_CASE )
assert list(infos.keys() ) == expected_configs
lowercase__ = expected_configs[0]
assert expected_config in infos
lowercase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = get_dataset_infos(SCREAMING_SNAKE_CASE )
assert expected_config in infos
lowercase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
with pytest.raises(SCREAMING_SNAKE_CASE ):
get_dataset_split_names(SCREAMING_SNAKE_CASE , config_name=SCREAMING_SNAKE_CASE )
| 43
|
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a ( lowercase , unittest.TestCase ):
UpperCamelCase : Any = KandinskyImgaImgPipeline
UpperCamelCase : int = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""]
UpperCamelCase : Optional[Any] = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
UpperCamelCase : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCamelCase : List[str] = False
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return 32
@property
def __snake_case ( self ):
return self.time_input_dim
@property
def __snake_case ( self ):
return self.time_input_dim * 4
@property
def __snake_case ( self ):
return 100
@property
def __snake_case ( self ):
UpperCAmelCase__ : str = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[int] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
UpperCAmelCase__ : Tuple = MultilingualCLIP(UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
UpperCAmelCase__ : Optional[Any] = UNetaDConditionModel(**UpperCamelCase_ )
return model
@property
def __snake_case ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __snake_case ( self ):
torch.manual_seed(0 )
UpperCAmelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def __snake_case ( self ):
UpperCAmelCase__ : Union[str, Any] = self.dummy_text_encoder
UpperCAmelCase__ : int = self.dummy_tokenizer
UpperCAmelCase__ : int = self.dummy_unet
UpperCAmelCase__ : Optional[Any] = self.dummy_movq
UpperCAmelCase__ : Any = {
'num_train_timesteps': 1_000,
'beta_schedule': 'linear',
'beta_start': 0.00085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
UpperCAmelCase__ : List[Any] = DDIMScheduler(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=0 ):
UpperCAmelCase__ : int = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase_ )
# create init_image
UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase_ ) ).to(UpperCamelCase_ )
UpperCAmelCase__ : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase__ : Union[str, Any] = Image.fromarray(np.uinta(UpperCamelCase_ ) ).convert('RGB' ).resize((256, 256) )
if str(UpperCamelCase_ ).startswith('mps' ):
UpperCAmelCase__ : Dict = torch.manual_seed(UpperCamelCase_ )
else:
UpperCAmelCase__ : Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
UpperCAmelCase__ : Tuple = {
'prompt': 'horse',
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'num_inference_steps': 10,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def __snake_case ( self ):
UpperCAmelCase__ : Optional[int] = 'cpu'
UpperCAmelCase__ : Any = self.get_dummy_components()
UpperCAmelCase__ : Any = self.pipeline_class(**UpperCamelCase_ )
UpperCAmelCase__ : Tuple = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
UpperCAmelCase__ : Optional[int] = output.images
UpperCAmelCase__ : Dict = pipe(
**self.get_dummy_inputs(UpperCamelCase_ ) , return_dict=UpperCamelCase_ , )[0]
UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase__ : Any = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __snake_case ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self ):
UpperCAmelCase__ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_img2img_frog.npy' )
UpperCAmelCase__ : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
UpperCAmelCase__ : Any = 'A red cartoon frog, 4k'
UpperCAmelCase__ : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior' , torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase_ )
UpperCAmelCase__ : Any = KandinskyImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1' , torch_dtype=torch.floataa )
UpperCAmelCase__ : Optional[Any] = pipeline.to(UpperCamelCase_ )
pipeline.set_progress_bar_config(disable=UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = pipe_prior(
UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
UpperCAmelCase__ : Optional[Any] = pipeline(
UpperCamelCase_ , image=UpperCamelCase_ , image_embeds=UpperCamelCase_ , negative_image_embeds=UpperCamelCase_ , generator=UpperCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type='np' , )
UpperCAmelCase__ : Union[str, Any] = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 110
| 0
|
SCREAMING_SNAKE_CASE__ : int = 6_5_5_2_1
def _A ( lowerCamelCase ):
a__ : List[str] = 1
a__ : Optional[int] = 0
for plain_chr in plain_text:
a__ : Union[str, Any] = (a + ord(lowerCamelCase )) % MOD_ADLER
a__ : Union[str, Any] = (b + a) % MOD_ADLER
return (b << 16) | a
| 629
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _A ( lowerCamelCase ):
a__ : List[str] = []
if isinstance(lowerCamelCase , lowerCamelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowerCamelCase ) )
elif isinstance(lowerCamelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError("Not supported" )
return shapes
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : List[str] = []
for d in reversed(lowerCamelCase ):
idx.append(flat_idx % d )
a__ : Union[str, Any] = flat_idx // d
return tuple(reversed(lowerCamelCase ) )
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(lowerCamelCase ) -> None:
a__ : int = True
for i in range(len(lowerCamelCase ) ):
a__ : Optional[Any] = -1 * (i + 1)
l[reversed_idx] &= tally
a__ : Tuple = l[reversed_idx]
if start_edges is None:
a__ : Optional[int] = [s == 0 for s in start]
reduce_edge_list(lowerCamelCase )
if end_edges is None:
a__ : Union[str, Any] = [e == (d - 1) for e, d in zip(lowerCamelCase , lowerCamelCase )]
reduce_edge_list(lowerCamelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowerCamelCase ) == 0:
return [()]
elif len(lowerCamelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
a__ : List[Tuple[slice, ...]] = []
a__ : List[slice] = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowerCamelCase , lowerCamelCase ):
if s == e:
path_list.append(slice(lowerCamelCase , s + 1 ) )
else:
break
a__ : Tuple[slice, ...] = tuple(lowerCamelCase )
a__ : Optional[Any] = len(lowerCamelCase )
# start == end, and we're done
if divergence_idx == len(lowerCamelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : Optional[Any] = start[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
a__ : List[str] = end[divergence_idx]
return tuple(
path + (slice(lowerCamelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
a__ : Optional[int] = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : Optional[int] = t.shape[:no_batch_dims]
a__ : List[str] = list(_flat_idx_to_idx(lowerCamelCase , lowerCamelCase ) )
# _get_minimal_slice_set is inclusive
a__ : Dict = list(_flat_idx_to_idx(flat_end - 1 , lowerCamelCase ) )
# Get an ordered list of slices to perform
a__ : str = _get_minimal_slice_set(
lowerCamelCase , lowerCamelCase , lowerCamelCase , )
a__ : Any = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase = False , lowerCamelCase = None , lowerCamelCase = False , ):
if not (len(lowerCamelCase ) > 0):
raise ValueError("Must provide at least one input" )
a__ : str = [shape[:no_batch_dims] for shape in _fetch_dims(lowerCamelCase )]
a__ : Dict = tuple([max(lowerCamelCase ) for s in zip(*lowerCamelCase )] )
def _prep_inputs(lowerCamelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
a__ : Any = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
a__ : Optional[Any] = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
a__ : Dict = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
a__ : Dict[str, Any] = tensor_tree_map(_prep_inputs , lowerCamelCase )
a__ : str = None
if _out is not None:
a__ : Optional[int] = tensor_tree_map(lambda lowerCamelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
a__ : Optional[Any] = 1
for d in orig_batch_dims:
flat_batch_dim *= d
a__ : Tuple = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowerCamelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
a__ : str = 0
a__ : Any = prepped_outputs
for _ in range(lowerCamelCase ):
# Chunk the input
if not low_mem:
a__ : str = _select_chunk
else:
a__ : Tuple = partial(
_chunk_slice , flat_start=lowerCamelCase , flat_end=min(lowerCamelCase , i + chunk_size ) , no_batch_dims=len(lowerCamelCase ) , )
a__ : Dict[str, Any] = tensor_tree_map(lowerCamelCase , lowerCamelCase )
# Run the layer on the chunk
a__ : Any = layer(**lowerCamelCase )
# Allocate space for the output
if out is None:
a__ : Optional[Any] = tensor_tree_map(lambda lowerCamelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowerCamelCase )
# Put the chunk in its pre-allocated space
if isinstance(lowerCamelCase , lowerCamelCase ):
def assign(lowerCamelCase , lowerCamelCase ) -> None:
for k, v in da.items():
if isinstance(lowerCamelCase , lowerCamelCase ):
assign(lowerCamelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
a__ : Dict = da[k]
assign(lowerCamelCase , lowerCamelCase )
elif isinstance(lowerCamelCase , lowerCamelCase ):
for xa, xa in zip(lowerCamelCase , lowerCamelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
a__ : Dict = xa
elif isinstance(lowerCamelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
a__ : Dict = output_chunk
else:
raise ValueError("Not supported" )
i += chunk_size
a__ : Any = tensor_tree_map(lambda lowerCamelCase : t.view(orig_batch_dims + t.shape[1:] ) , lowerCamelCase )
return out
class __lowerCAmelCase :
def __init__( self , snake_case = 512 , ) -> List[str]:
"""simple docstring"""
a__ : int = max_chunk_size
a__ : Optional[int] = None
a__ : Optional[tuple] = None
def _snake_case ( self , snake_case , snake_case , snake_case ) -> int:
"""simple docstring"""
logging.info("Tuning chunk size..." )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
a__ : List[int] = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
a__ : List[str] = [c for c in candidates if c > min_chunk_size]
a__ : Optional[int] = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case , chunk_size=snake_case )
return True
except RuntimeError:
return False
a__ : Union[str, Any] = 0
a__ : Dict = len(snake_case ) - 1
while i > min_viable_chunk_size_index:
a__ : Any = test_chunk_size(candidates[i] )
if not viable:
a__ : List[Any] = (min_viable_chunk_size_index + i) // 2
else:
a__ : Tuple = i
a__ : Any = (i + len(snake_case ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _snake_case ( self , snake_case , snake_case ) -> bool:
"""simple docstring"""
a__ : str = True
for aa, aa in zip(snake_case , snake_case ):
assert type(snake_case ) == type(snake_case )
if isinstance(snake_case , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case , snake_case )
elif isinstance(snake_case , snake_case ):
a__ : Union[str, Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
a__ : List[Any] = [v for _, v in sorted(aa.items() , key=lambda snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case , snake_case )
else:
consistent &= aa == aa
return consistent
def _snake_case ( self , snake_case , snake_case , snake_case , ) -> int:
"""simple docstring"""
a__ : List[Any] = True
a__ : tuple = tree_map(lambda snake_case : a.shape if isinstance(snake_case , torch.Tensor ) else a , snake_case , snake_case )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case )
a__ : Union[str, Any] = self._compare_arg_caches(self.cached_arg_data , snake_case )
else:
# Otherwise, we can reuse the precomputed value
a__ : Optional[int] = False
if not consistent:
a__ : List[str] = self._determine_favorable_chunk_size(
snake_case , snake_case , snake_case , )
a__ : List[str] = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 629
| 1
|
"""simple docstring"""
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase ( _UpperCamelCase : str ) -> str:
'''simple docstring'''
return "".join(sorted(_UpperCamelCase ) )
def lowerCamelCase ( _UpperCamelCase : str ) -> list[str]:
'''simple docstring'''
return word_by_signature[signature(_UpperCamelCase )]
UpperCAmelCase : str = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
UpperCAmelCase : Optional[Any] = sorted({word.strip().lower() for word in data.splitlines()})
UpperCAmelCase : Dict = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 139
|
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class lowerCamelCase__ ( A , A ):
"""simple docstring"""
__a = 1
@register_to_config
def __init__( self : Dict , UpperCamelCase : int = 1_000 , UpperCamelCase : Optional[Union[np.ndarray, List[float]]] = None ):
'''simple docstring'''
self.set_timesteps(UpperCamelCase )
# standard deviation of the initial noise distribution
__UpperCAmelCase : str = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__UpperCAmelCase : int = 4
# running values
__UpperCAmelCase : Union[str, Any] = []
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : Union[str, torch.device] = None ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = num_inference_steps
__UpperCAmelCase : Tuple = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__UpperCAmelCase : Union[str, Any] = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__UpperCAmelCase : Dict = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__UpperCAmelCase : int = torch.sin(steps * math.pi / 2 ) ** 2
__UpperCAmelCase : Union[str, Any] = (1.0 - self.betas**2) ** 0.5
__UpperCAmelCase : Optional[Any] = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__UpperCAmelCase : Any = timesteps.to(UpperCamelCase )
__UpperCAmelCase : List[Any] = []
def lowerCamelCase__ ( self : Dict , UpperCamelCase : torch.FloatTensor , UpperCamelCase : int , UpperCamelCase : torch.FloatTensor , UpperCamelCase : bool = True , ):
'''simple docstring'''
if self.num_inference_steps is None:
raise ValueError(
"""Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler""" )
__UpperCAmelCase : List[Any] = (self.timesteps == timestep).nonzero().item()
__UpperCAmelCase : List[str] = timestep_index + 1
__UpperCAmelCase : Optional[Any] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(UpperCamelCase )
if len(self.ets ) == 1:
__UpperCAmelCase : List[Any] = self.ets[-1]
elif len(self.ets ) == 2:
__UpperCAmelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__UpperCAmelCase : Tuple = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__UpperCAmelCase : int = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__UpperCAmelCase : int = self._get_prev_sample(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCamelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : torch.FloatTensor , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : List[str] ):
'''simple docstring'''
return sample
def lowerCamelCase__ ( self : Dict , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.alphas[timestep_index]
__UpperCAmelCase : List[str] = self.betas[timestep_index]
__UpperCAmelCase : List[str] = self.alphas[prev_timestep_index]
__UpperCAmelCase : Tuple = self.betas[prev_timestep_index]
__UpperCAmelCase : Dict = (sample - sigma * ets) / max(UpperCamelCase , 1e-8 )
__UpperCAmelCase : Union[str, Any] = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self : Tuple ):
'''simple docstring'''
return self.config.num_train_timesteps
| 139
| 1
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Dict = '''▁'''
__lowercase : Optional[Any] = {'''vocab_file''': '''spiece.model'''}
__lowercase : str = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__lowercase : Optional[int] = {
'''google/reformer-crime-and-punishment''': 5_2_4_2_8_8,
}
class lowerCAmelCase ( __UpperCAmelCase ):
"""simple docstring"""
__lowercase :List[Any] = VOCAB_FILES_NAMES
__lowercase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowercase :Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase :Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self , UpperCamelCase__ , UpperCamelCase__="</s>" , UpperCamelCase__="<unk>" , UpperCamelCase__=[] , UpperCamelCase__ = None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , additional_special_tokens=__SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = vocab_file
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__SCREAMING_SNAKE_CASE )
@property
def _lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
return self.sp_model.get_piece_size()
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = {self.convert_ids_to_tokens(__SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.__dict__.copy()
lowerCamelCase_ = None
return state
def __setstate__( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
lowerCamelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCamelCase_ = {}
lowerCamelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.sp_model.encode(__SCREAMING_SNAKE_CASE , out_type=__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
return self.sp_model.piece_to_id(__SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if index < self.sp_model.get_piece_size():
lowerCamelCase_ = self.sp_model.IdToPiece(__SCREAMING_SNAKE_CASE )
return token
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> int:
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE ) + token
lowerCamelCase_ = []
else:
current_sub_tokens.append(__SCREAMING_SNAKE_CASE )
out_string += self.sp_model.decode(__SCREAMING_SNAKE_CASE )
return out_string.strip()
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
'''simple docstring'''
if not os.path.isdir(__SCREAMING_SNAKE_CASE ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
__SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(__SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCamelCase_ = self.sp_model.serialized_model_proto()
fi.write(__SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 716
|
"""simple docstring"""
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase ( a , unittest.TestCase ):
"""simple docstring"""
__lowercase :Tuple = FlaxAutoencoderKL
@property
def _lowerCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = 4
lowerCamelCase_ = 3
lowerCamelCase_ = (32, 32)
lowerCamelCase_ = jax.random.PRNGKey(0 )
lowerCamelCase_ = jax.random.uniform(UpperCamelCase__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _lowerCAmelCase ( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCamelCase_ = self.dummy_input
return init_dict, inputs_dict
| 66
| 0
|
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 1_0_0 ):
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : str = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 407
|
'''simple docstring'''
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Any = int(UpperCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return f'''{h}:{m:02d}:{s:02d}''' if h != 0 else f'''{m:02d}:{s:02d}'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=3_0_0 ):
# docstyle-ignore
return f'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Dict = """<table border=\"1\" class=\"dataframe\">\n"""
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += f''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
UpperCAmelCase__ : Any = f'''{elt:.6f}''' if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else str(UpperCamelCase__ )
html_code += f''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class _snake_case :
lowerCAmelCase :int = 5
lowerCAmelCase :List[str] = 0.2
def __init__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = None , _lowerCamelCase = 300 , ):
UpperCAmelCase__ : List[Any] = total
UpperCAmelCase__ : Optional[int] = """""" if prefix is None else prefix
UpperCAmelCase__ : Optional[int] = leave
UpperCAmelCase__ : List[str] = parent
UpperCAmelCase__ : Optional[Any] = width
UpperCAmelCase__ : int = None
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : List[Any] = None
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase = False , _lowerCamelCase = None):
UpperCAmelCase__ : List[Any] = value
if comment is not None:
UpperCAmelCase__ : List[str] = comment
if self.last_value is None:
UpperCAmelCase__ : Any = time.time()
UpperCAmelCase__ : List[Any] = value
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : List[Any] = self.warmup
UpperCAmelCase__ : Any = 1
self.update_bar(_lowerCamelCase)
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total):
if self.first_calls > 0:
self.first_calls -= 1
UpperCAmelCase__ : str = time.time()
UpperCAmelCase__ : List[str] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
UpperCAmelCase__ : List[str] = self.elapsed_time / (value - self.start_value)
else:
UpperCAmelCase__ : Optional[Any] = None
if value >= self.total:
UpperCAmelCase__ : Union[str, Any] = self.total
UpperCAmelCase__ : Tuple = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
UpperCAmelCase__ : str = self.average_time_per_item * (self.total - value)
self.update_bar(_lowerCamelCase)
UpperCAmelCase__ : str = value
UpperCAmelCase__ : str = current_time
if self.average_time_per_item is None:
UpperCAmelCase__ : Tuple = 1
else:
UpperCAmelCase__ : Optional[Any] = max(int(self.update_every / self.average_time_per_item) , 1)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None):
UpperCAmelCase__ : Tuple = """ """ * (len(str(self.total)) - len(str(_lowerCamelCase))) + str(_lowerCamelCase)
if self.elapsed_time is None:
UpperCAmelCase__ : str = f'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
UpperCAmelCase__ : List[Any] = f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)}'''
else:
UpperCAmelCase__ : str = (
f'''[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <'''
f''' {format_time(self.predicted_remaining)}'''
)
self.label += f''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment) == 0 else f''', {self.comment}]'''
self.display()
def snake_case__ ( self):
UpperCAmelCase__ : int = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
UpperCAmelCase__ : Optional[Any] = disp.display(disp.HTML(self.html_code) , display_id=_lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def snake_case__ ( self):
if self.parent is None and self.output is not None:
self.output.update(disp.HTML(""""""))
class _snake_case ( a__ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=None):
super().__init__(_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = None if column_names is None else [column_names]
UpperCAmelCase__ : List[str] = None
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width)
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table)
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
UpperCAmelCase__ : Any = disp.display(disp.HTML(self.html_code) , display_id=_lowerCamelCase)
else:
self.output.update(disp.HTML(self.html_code))
def snake_case__ ( self , _lowerCamelCase):
if self.inner_table is None:
UpperCAmelCase__ : Any = [list(values.keys()), list(values.values())]
else:
UpperCAmelCase__ : int = self.inner_table[0]
if len(self.inner_table) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(_lowerCamelCase)
UpperCAmelCase__ : str = columns
self.inner_table.append([values[c] for c in columns])
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=300):
UpperCAmelCase__ : Optional[int] = NotebookProgressBar(_lowerCamelCase , prefix=_lowerCamelCase , parent=self , width=_lowerCamelCase)
return self.child_bar
def snake_case__ ( self):
UpperCAmelCase__ : int = None
self.display()
class _snake_case ( a__ ):
def __init__( self):
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Tuple = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : Union[str, Any] = """Epoch""" if args.evaluation_strategy == IntervalStrategy.EPOCH else """Step"""
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : List[str] = 0
UpperCAmelCase__ : Optional[Any] = [self.first_column] + ["""Training Loss"""]
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append("""Validation Loss""")
UpperCAmelCase__ : Union[str, Any] = NotebookTrainingTracker(state.max_steps , _lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase):
UpperCAmelCase__ : int = int(state.epoch) if int(state.epoch) == state.epoch else f'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=f'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
UpperCAmelCase__ : str = False
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase):
if not has_length(_lowerCamelCase):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
UpperCAmelCase__ : Dict = self.training_tracker.add_child(len(_lowerCamelCase))
else:
UpperCAmelCase__ : Dict = NotebookProgressBar(len(_lowerCamelCase))
self.prediction_bar.update(1)
else:
self.prediction_bar.update(self.prediction_bar.value + 1)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase):
if self.prediction_bar is not None:
self.prediction_bar.close()
UpperCAmelCase__ : Tuple = None
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase):
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
UpperCAmelCase__ : List[str] = {"""Training Loss""": logs["""loss"""]}
# First column is necessarily Step sine we're not in epoch eval strategy
UpperCAmelCase__ : Optional[Any] = state.global_step
self.training_tracker.write_line(_lowerCamelCase)
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None , **_lowerCamelCase):
if self.training_tracker is not None:
UpperCAmelCase__ : Optional[int] = {"""Training Loss""": """No log""", """Validation Loss""": """No log"""}
for log in reversed(state.log_history):
if "loss" in log:
UpperCAmelCase__ : Optional[int] = log["""loss"""]
break
if self.first_column == "Epoch":
UpperCAmelCase__ : Any = int(state.epoch)
else:
UpperCAmelCase__ : str = state.global_step
UpperCAmelCase__ : str = """eval"""
for k in metrics:
if k.endswith("""_loss"""):
UpperCAmelCase__ : int = re.sub(r"""\_loss$""" , """""" , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = metrics.pop("""total_flos""" , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = metrics.pop("""epoch""" , _lowerCamelCase)
UpperCAmelCase__ : List[Any] = metrics.pop(f'''{metric_key_prefix}_runtime''' , _lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = metrics.pop(f'''{metric_key_prefix}_samples_per_second''' , _lowerCamelCase)
UpperCAmelCase__ : Optional[int] = metrics.pop(f'''{metric_key_prefix}_steps_per_second''' , _lowerCamelCase)
UpperCAmelCase__ : Optional[int] = metrics.pop(f'''{metric_key_prefix}_jit_compilation_time''' , _lowerCamelCase)
for k, v in metrics.items():
if k == f'''{metric_key_prefix}_loss''':
UpperCAmelCase__ : List[str] = v
else:
UpperCAmelCase__ : Tuple = k.split("""_""")
UpperCAmelCase__ : Union[str, Any] = """ """.join([part.capitalize() for part in splits[1:]])
UpperCAmelCase__ : int = v
self.training_tracker.write_line(_lowerCamelCase)
self.training_tracker.remove_child()
UpperCAmelCase__ : List[str] = None
# Evaluation takes a long time so we should force the next update.
UpperCAmelCase__ : Union[str, Any] = True
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase):
self.training_tracker.update(
state.global_step , comment=f'''Epoch {int(state.epoch)}/{state.num_train_epochs}''' , force_update=_lowerCamelCase)
UpperCAmelCase__ : Optional[int] = None
| 407
| 1
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A( unittest.TestCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=18 , SCREAMING_SNAKE_CASE_=30 , SCREAMING_SNAKE_CASE_=4_00 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , ):
UpperCamelCase__ = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = num_channels
UpperCamelCase__ = image_size
UpperCamelCase__ = min_resolution
UpperCamelCase__ = max_resolution
UpperCamelCase__ = do_resize
UpperCamelCase__ = size
UpperCamelCase__ = apply_ocr
def UpperCAmelCase_ (self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class __A( __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def UpperCAmelCase_ (self ):
UpperCamelCase__ = LayoutLMvaImageProcessingTester(self )
@property
def UpperCAmelCase_ (self ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """apply_ocr""" ) )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
UpperCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def UpperCAmelCase_ (self ):
pass
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(encoding.boxes , SCREAMING_SNAKE_CASE_ )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase_ (self ):
# Initialize image_processing
UpperCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor )
# Test not batched input
UpperCamelCase__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def UpperCAmelCase_ (self ):
# with apply_OCR = True
UpperCamelCase__ = LayoutLMvaImageProcessor()
from datasets import load_dataset
UpperCamelCase__ = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
UpperCamelCase__ = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
UpperCamelCase__ = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
UpperCamelCase__ = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(encoding.boxes , SCREAMING_SNAKE_CASE_ )
# with apply_OCR = False
UpperCamelCase__ = LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = image_processing(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 86
|
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def __magic_name__ ( __a : int , __a : List[str] , __a : str=[] ):
'''simple docstring'''
UpperCamelCase__ = size[0] - overlap_pixels * 2
UpperCamelCase__ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
UpperCamelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
UpperCamelCase__ = np.pad(__a , mode="""linear_ramp""" , pad_width=__a , end_values=0 )
if "l" in remove_borders:
UpperCamelCase__ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
UpperCamelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
UpperCamelCase__ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
UpperCamelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def __magic_name__ ( __a : int , __a : Dict , __a : Optional[int] ):
'''simple docstring'''
return max(__a , min(__a , __a ) )
def __magic_name__ ( __a : [int] , __a : [int] , __a : [int] ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def __magic_name__ ( __a : [int] , __a : int , __a : [int] ):
'''simple docstring'''
UpperCamelCase__ = list(__a )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
UpperCamelCase__ = clamp_rect(__a , [0, 0] , [image_size[0], image_size[1]] )
return rect
def __magic_name__ ( __a : Optional[int] , __a : Tuple , __a : str , __a : List[Any] ):
'''simple docstring'''
UpperCamelCase__ = Image.new("""RGB""" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(__a , (original_slice, 0) )
return result
def __magic_name__ ( __a : int , __a : int ):
'''simple docstring'''
UpperCamelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
UpperCamelCase__ = tile.crop(__a )
return tile
def __magic_name__ ( __a : List[str] , __a : Any ):
'''simple docstring'''
UpperCamelCase__ = n % d
return n - divisor
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 3_50 , ):
super().__init__(
vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , max_noise_level=SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
torch.manual_seed(0 )
UpperCamelCase__ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
UpperCamelCase__ = add_overlap_rect(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , image.size )
UpperCamelCase__ = image.crop(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
UpperCamelCase__ = translated_slice_x - (original_image_slice / 2)
UpperCamelCase__ = max(0 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = squeeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = to_input.size
UpperCamelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
UpperCamelCase__ = super(SCREAMING_SNAKE_CASE_ , self ).__call__(image=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ).images[0]
UpperCamelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
UpperCamelCase__ = unsqueeze_tile(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
UpperCamelCase__ = []
if x == 0:
remove_borders.append("""l""" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("""r""" )
if y == 0:
remove_borders.append("""t""" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("""b""" )
UpperCamelCase__ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE_ ) , mode="""L""" , )
final_image.paste(
SCREAMING_SNAKE_CASE_ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 75 , SCREAMING_SNAKE_CASE_ = 9.0 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_28 , SCREAMING_SNAKE_CASE_ = 32 , SCREAMING_SNAKE_CASE_ = 32 , ):
UpperCamelCase__ = Image.new("""RGB""" , (image.size[0] * 4, image.size[1] * 4) )
UpperCamelCase__ = math.ceil(image.size[0] / tile_size )
UpperCamelCase__ = math.ceil(image.size[1] / tile_size )
UpperCamelCase__ = tcx * tcy
UpperCamelCase__ = 0
for y in range(SCREAMING_SNAKE_CASE_ ):
for x in range(SCREAMING_SNAKE_CASE_ ):
self._process_tile(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prompt=SCREAMING_SNAKE_CASE_ , num_inference_steps=SCREAMING_SNAKE_CASE_ , guidance_scale=SCREAMING_SNAKE_CASE_ , noise_level=SCREAMING_SNAKE_CASE_ , negative_prompt=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=SCREAMING_SNAKE_CASE_ , eta=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , latents=SCREAMING_SNAKE_CASE_ , )
current_count += 1
if callback is not None:
callback({"""progress""": current_count / total_tile_count, """image""": final_image} )
return final_image
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = """stabilityai/stable-diffusion-x4-upscaler"""
UpperCamelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(__a , revision="""fp16""" , torch_dtype=torch.floataa )
UpperCamelCase__ = pipe.to("""cuda""" )
UpperCamelCase__ = Image.open("""../../docs/source/imgs/diffusers_library.jpg""" )
def callback(__a : Optional[int] ):
print(f"progress: {obj['progress']:.4f}" )
obj["image"].save("""diffusers_library_progress.jpg""" )
UpperCamelCase__ = pipe(image=__a , prompt="""Black font, white background, vector""" , noise_level=40 , callback=__a )
final_image.save("""diffusers_library.jpg""" )
if __name__ == "__main__":
main()
| 86
| 1
|
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowerCAmelCase : Optional[int] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
_lowerCAmelCase : Union[str, Any] = {"facebook/blenderbot-3B": 1_2_8}
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ['input_ids', 'attention_mask']
_lowerCAmelCase = BlenderbotTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="replace" , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=False , lowerCamelCase=True , **lowerCamelCase , ) -> Any:
"""simple docstring"""
super().__init__(
lowerCamelCase , lowerCamelCase , tokenizer_file=lowerCamelCase , errors=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , sep_token=lowerCamelCase , cls_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , mask_token=lowerCamelCase , add_prefix_space=lowerCamelCase , trim_offsets=lowerCamelCase , **lowerCamelCase , )
snake_case__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , lowerCamelCase ) != add_prefix_space:
snake_case__ : Optional[int] = getattr(lowerCamelCase , pre_tok_state.pop('''type''' ) )
snake_case__ : Optional[int] = add_prefix_space
snake_case__ : List[str] = pre_tok_class(**lowerCamelCase )
snake_case__ : str = add_prefix_space
snake_case__ : Any = '''post_processor'''
snake_case__ : Any = getattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
if tokenizer_component_instance:
snake_case__ : str = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : Optional[int] = tuple(state['''sep'''] )
if "cls" in state:
snake_case__ : List[str] = tuple(state['''cls'''] )
snake_case__ : Tuple = False
if state.get('''add_prefix_space''' , lowerCamelCase ) != add_prefix_space:
snake_case__ : str = add_prefix_space
snake_case__ : Union[str, Any] = True
if state.get('''trim_offsets''' , lowerCamelCase ) != trim_offsets:
snake_case__ : List[Any] = trim_offsets
snake_case__ : Union[str, Any] = True
if changes_to_apply:
snake_case__ : int = getattr(lowerCamelCase , state.pop('''type''' ) )
snake_case__ : Optional[Any] = component_class(**lowerCamelCase )
setattr(self.backend_tokenizer , lowerCamelCase , lowerCamelCase )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def lowercase__ ( self ) -> str:
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error('''Using mask_token, but it is not set yet.''' )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase__ ( self , lowerCamelCase ) -> str:
"""simple docstring"""
snake_case__ : Optional[int] = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else value
snake_case__ : Any = value
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> BatchEncoding:
"""simple docstring"""
snake_case__ : Any = kwargs.get('''is_split_into_words''' , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , *lowerCamelCase , **lowerCamelCase ) -> BatchEncoding:
"""simple docstring"""
snake_case__ : List[Any] = kwargs.get('''is_split_into_words''' , lowerCamelCase )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*lowerCamelCase , **lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
snake_case__ : Any = self._tokenizer.model.save(lowerCamelCase , name=lowerCamelCase )
return tuple(lowerCamelCase )
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[Any]:
"""simple docstring"""
return token_ids_a + [self.eos_token_id]
def lowercase__ ( self , lowerCamelCase ) -> List[int]:
"""simple docstring"""
snake_case__ : Optional[Any] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase )
snake_case__ : List[str] = ''' '''.join(lowerCamelCase )
snake_case__ : List[Any] = self.encode(lowerCamelCase )
if len(lowerCamelCase ) > self.model_max_length:
snake_case__ : int = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 261
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _A ( snake_case__ : int , snake_case__ : int , snake_case__ : Optional[Any]=None , snake_case__ : Any=None ):
if attention_mask is None:
snake_case__ : Optional[int] = tf.cast(tf.math.not_equal(snake_case__ , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class snake_case :
"""simple docstring"""
_lowerCAmelCase = OPTConfig
_lowerCAmelCase = {}
_lowerCAmelCase = 'gelu'
def __init__( self , lowerCamelCase , lowerCamelCase=13 , lowerCamelCase=7 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=99 , lowerCamelCase=16 , lowerCamelCase=2 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=20 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=0 , lowerCamelCase=16 , lowerCamelCase=16 , ) -> List[str]:
"""simple docstring"""
snake_case__ : List[str] = parent
snake_case__ : List[str] = batch_size
snake_case__ : str = seq_length
snake_case__ : Union[str, Any] = is_training
snake_case__ : Union[str, Any] = use_labels
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[Any] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Union[str, Any] = eos_token_id
snake_case__ : Optional[int] = pad_token_id
snake_case__ : Dict = bos_token_id
snake_case__ : List[Any] = embed_dim
snake_case__ : Tuple = word_embed_proj_dim
snake_case__ : Any = False
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
snake_case__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
snake_case__ : Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
snake_case__ : int = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCamelCase , **self.config_updates , )
snake_case__ : Dict = prepare_opt_inputs_dict(lowerCamelCase , lowerCamelCase )
return config, inputs_dict
def lowercase__ ( self , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = TFOPTModel(config=lowerCamelCase )
snake_case__ : str = inputs_dict['''input_ids''']
snake_case__ : List[str] = input_ids[:1, :]
snake_case__ : Tuple = inputs_dict['''attention_mask'''][:1, :]
snake_case__ : Optional[Any] = 1
# first forward pass
snake_case__ : List[Any] = model(lowerCamelCase , attention_mask=lowerCamelCase , use_cache=lowerCamelCase )
snake_case__ ,snake_case__ : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
snake_case__ : Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
snake_case__ : str = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
snake_case__ : str = model(lowerCamelCase , attention_mask=lowerCamelCase )[0]
snake_case__ : int = model(lowerCamelCase , attention_mask=lowerCamelCase , past_key_values=lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
snake_case__ : Optional[int] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
snake_case__ : Any = output_from_no_past[:, -3:, random_slice_idx]
snake_case__ : int = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCamelCase , lowerCamelCase , rtol=1E-3 )
@require_tf
class snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_lowerCAmelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
_lowerCAmelCase = (
{'feature-extraction': TFOPTModel, 'text-generation': TFOPTForCausalLM} if is_tf_available() else {}
)
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = False
_lowerCAmelCase = 1_0
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = TFOPTModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=lowerCamelCase )
def lowercase__ ( self ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self ) -> int:
"""simple docstring"""
snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase )
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ ,snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(lowerCamelCase , lowerCamelCase ):
if hasattr(lowerCamelCase , '''weight''' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCamelCase , '''weight''' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
snake_case__ : Tuple = model_class(config=lowerCamelCase )
snake_case__ : Tuple = _get_word_embedding_weight(lowerCamelCase , model.get_input_embeddings() )
snake_case__ : List[Any] = _get_word_embedding_weight(lowerCamelCase , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCamelCase )
snake_case__ : int = _get_word_embedding_weight(lowerCamelCase , model.get_input_embeddings() )
snake_case__ : Optional[Any] = _get_word_embedding_weight(lowerCamelCase , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
snake_case__ : Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCamelCase )
# check that weights remain the same after resizing
snake_case__ : int = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case__ : Optional[int] = False
self.assertTrue(lowerCamelCase )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCamelCase )
snake_case__ : Union[str, Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
snake_case__ : Optional[Any] = False
self.assertTrue(lowerCamelCase )
def _A ( snake_case__ : Optional[Any] ):
return tf.constant(snake_case__ , dtype=tf.intaa )
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCAmelCase = 9_9
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2
snake_case__ : int = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
snake_case__ : Dict = input_ids.shape[0]
snake_case__ : Optional[Any] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : Optional[Any] = TFOPTModel.from_pretrained('''facebook/opt-350m''' )
snake_case__ : List[str] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
snake_case__ : Optional[Any] = tf.not_equal(lowerCamelCase , model.config.pad_token_id )
with tf.GradientTape():
snake_case__ : List[str] = model(input_ids=lowerCamelCase , attention_mask=lowerCamelCase ).last_hidden_state
snake_case__ : Dict = (1, 11, 512)
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : int = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=4E-3 ) )
snake_case__ : Optional[int] = tf.function(lowerCamelCase , jit_compile=lowerCamelCase )
snake_case__ : Dict = xla_generate(lowerCamelCase , lowerCamelCase )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase , atol=4E-2 ) )
@require_tf
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def lowercase__ ( self ) -> int:
"""simple docstring"""
super().setUp()
snake_case__ : str = '''facebook/opt-350m'''
def lowercase__ ( self ) -> str:
"""simple docstring"""
snake_case__ : List[Any] = TFOPTForCausalLM.from_pretrained(self.path_model )
snake_case__ : List[str] = GPTaTokenizer.from_pretrained(self.path_model )
snake_case__ : Dict = [
'''Today is a beautiful day and I want to''',
'''In the city of''',
'''Paris is the capital of France and''',
'''Computers and mobile phones have taken''',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
snake_case__ : Union[str, Any] = tokenizer(lowerCamelCase , return_tensors='''tf''' , padding=lowerCamelCase , add_special_tokens=lowerCamelCase )
snake_case__ : List[Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
snake_case__ : Tuple = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-4 ) )
snake_case__ : Optional[Any] = tf.function(lowerCamelCase , jit_compile=lowerCamelCase )
snake_case__ : int = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCamelCase , lowerCamelCase , atol=1E-4 ) )
@require_tf
@slow
class snake_case ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase__ ( self ) -> int:
"""simple docstring"""
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Union[str, Any] = '''facebook/opt-125m'''
snake_case__ : Dict = [
'''Today is a beautiful day and I want to''',
'''In the city of New York, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
snake_case__ : Dict = []
snake_case__ : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Union[str, Any] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
for prompt in self.prompts:
snake_case__ : Tuple = tokenizer(lowerCamelCase , return_tensors='''tf''' ).input_ids
snake_case__ : Optional[int] = model.generate(lowerCamelCase , max_length=10 )
snake_case__ : Optional[int] = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowercase__ ( self ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict = '''facebook/opt-350m'''
snake_case__ : Optional[Any] = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
snake_case__ : str = '''left'''
# use different length sentences to test batching
snake_case__ : List[str] = [
'''Hello, my dog is a little''',
'''Today, I''',
]
snake_case__ : List[str] = tokenizer(lowerCamelCase , return_tensors='''tf''' , padding=lowerCamelCase )
snake_case__ : Tuple = inputs['''input_ids''']
snake_case__ : Any = model.generate(input_ids=lowerCamelCase , attention_mask=inputs['''attention_mask'''] )
snake_case__ : Tuple = tokenizer(sentences[0] , return_tensors='''tf''' ).input_ids
snake_case__ : Union[str, Any] = model.generate(input_ids=lowerCamelCase )
snake_case__ : str = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['''attention_mask'''][-1] , tf.intaa ) )
snake_case__ : Optional[int] = tokenizer(sentences[1] , return_tensors='''tf''' ).input_ids
snake_case__ : int = model.generate(input_ids=lowerCamelCase , max_length=model.config.max_length - num_paddings )
snake_case__ : int = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
snake_case__ : Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCamelCase )
snake_case__ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCamelCase )
snake_case__ : Any = [
'''Hello, my dog is a little bit of a dork.\nI\'m a little bit''',
'''Today, I was in the middle of a conversation with a friend about the''',
]
self.assertListEqual(lowerCamelCase , lowerCamelCase )
self.assertListEqual(lowerCamelCase , [non_padded_sentence, padded_sentence] )
def lowercase__ ( self ) -> Dict:
"""simple docstring"""
snake_case__ : str = '''facebook/opt-350m'''
snake_case__ : int = [
'''Today is a beautiful day and I want to''',
'''In the city of San Francisco, the city''',
'''Paris is the capital of France and the capital''',
'''Computers and mobile phones have taken over the''',
]
snake_case__ : Optional[Any] = []
snake_case__ : Dict = GPTaTokenizer.from_pretrained(lowerCamelCase )
snake_case__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowerCamelCase )
for prompt in self.prompts:
snake_case__ : List[str] = tokenizer(lowerCamelCase , return_tensors='''tf''' ).input_ids
snake_case__ : int = model.generate(lowerCamelCase , max_length=10 )
snake_case__ : Tuple = tokenizer.batch_decode(lowerCamelCase , skip_special_tokens=lowerCamelCase )
predicted_outputs += generated_string
self.assertListEqual(lowerCamelCase , lowerCamelCase )
| 261
| 1
|
'''simple docstring'''
from math import sqrt
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> int:
"""simple docstring"""
__a = 0
for i in range(1, int(sqrt(SCREAMING_SNAKE_CASE__ ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE__ ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE__ ):
total += i
return total - n
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int = 10000 ) -> int:
"""simple docstring"""
__a = sum(
i
for i in range(1, SCREAMING_SNAKE_CASE__ )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE__ ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 270
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: int ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
__a = f"""Input value of [number={number}] must be an integer"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
if number < 1:
raise ValueError('Input must be a positive integer' )
return -1 if len(prime_factors(SCREAMING_SNAKE_CASE__ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 270
| 1
|
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
with open(__UpperCAmelCase ) as metadata_file:
lowerCamelCase_ : List[str] = json.load(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = LukeConfig(use_entity_aware_attention=__UpperCAmelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowerCamelCase_ : Optional[Any] = torch.load(__UpperCAmelCase , map_location="cpu" )["module"]
# Load the entity vocab file
lowerCamelCase_ : Optional[Any] = load_original_entity_vocab(__UpperCAmelCase )
# add an entry for [MASK2]
lowerCamelCase_ : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase_ : Any = AddedToken("<ent>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = AddedToken("<ent2>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , "tokenizer_config.json" ) , "r" ) as f:
lowerCamelCase_ : Any = json.load(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = "MLukeTokenizer"
with open(os.path.join(__UpperCAmelCase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Tuple = MLukeTokenizer.from_pretrained(__UpperCAmelCase )
# Initialize the embeddings of the special tokens
lowerCamelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(["@"] )[0]
lowerCamelCase_ : Dict = tokenizer.convert_tokens_to_ids(["#"] )[0]
lowerCamelCase_ : Optional[int] = state_dict["embeddings.word_embeddings.weight"]
lowerCamelCase_ : List[Any] = word_emb[ent_init_index].unsqueeze(0 )
lowerCamelCase_ : str = word_emb[enta_init_index].unsqueeze(0 )
lowerCamelCase_ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCamelCase_ : List[Any] = state_dict[bias_name]
lowerCamelCase_ : Any = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCamelCase_ : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCamelCase_ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase_ : str = f"encoder.layer.{layer_index}.attention.self."
lowerCamelCase_ : Optional[Any] = state_dict[prefix + matrix_name]
lowerCamelCase_ : List[str] = state_dict[prefix + matrix_name]
lowerCamelCase_ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase_ : Optional[int] = state_dict["entity_embeddings.entity_embeddings.weight"]
lowerCamelCase_ : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCamelCase_ : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCamelCase_ : Optional[int] = state_dict["entity_predictions.bias"]
lowerCamelCase_ : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCamelCase_ : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCamelCase_ : str = LukeForMaskedLM(config=__UpperCAmelCase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
lowerCamelCase_ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
lowerCamelCase_ : str = state_dict[key]
else:
lowerCamelCase_ : Optional[int] = state_dict[key]
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if set(__UpperCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__UpperCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCamelCase_ : str = MLukeTokenizer.from_pretrained(__UpperCAmelCase , task="entity_classification" )
lowerCamelCase_ : Union[str, Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
lowerCamelCase_ : Any = (0, 9)
lowerCamelCase_ : List[Any] = tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors="pt" )
lowerCamelCase_ : Any = model(**__UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase_ : Any = torch.Size((1, 33, 768) )
lowerCamelCase_ : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase_ : Union[str, Any] = torch.Size((1, 1, 768) )
lowerCamelCase_ : str = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCamelCase_ : Any = MLukeTokenizer.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = "Tokyo is the capital of <mask>."
lowerCamelCase_ : Optional[int] = (24, 30)
lowerCamelCase_ : Dict = tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors="pt" )
lowerCamelCase_ : List[Any] = model(**__UpperCAmelCase )
lowerCamelCase_ : Dict = encoding["input_ids"][0].tolist()
lowerCamelCase_ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
lowerCamelCase_ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
lowerCamelCase_ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCAmelCase ) )
model.save_pretrained(__UpperCAmelCase )
def __a ( __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : int = ["[MASK]", "[PAD]", "[UNK]"]
lowerCamelCase_ : int = [json.loads(__UpperCAmelCase ) for line in open(__UpperCAmelCase )]
lowerCamelCase_ : Optional[Any] = {}
for entry in data:
lowerCamelCase_ : str = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCamelCase_ : Union[str, Any] = entity_id
break
lowerCamelCase_ : Any = f"{language}:{entity_name}"
lowerCamelCase_ : int = entity_id
return new_mapping
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
snake_case_ : Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 488
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
snake_case_ : Optional[Any] = None
snake_case_ : Tuple = logging.get_logger(__name__)
snake_case_ : str = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
snake_case_ : int = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
snake_case_ : Optional[Any] = {
"facebook/mbart-large-en-ro": 1024,
"facebook/mbart-large-cc25": 1024,
}
# fmt: off
snake_case_ : Any = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = VOCAB_FILES_NAMES
lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase = ["input_ids", "attention_mask"]
lowerCamelCase = MBartTokenizer
lowerCamelCase = []
lowerCamelCase = []
def __init__( self : Optional[Any] , __magic_name__ : Tuple=None , __magic_name__ : int=None , __magic_name__ : Dict="<s>" , __magic_name__ : int="</s>" , __magic_name__ : List[str]="</s>" , __magic_name__ : List[Any]="<s>" , __magic_name__ : Optional[int]="<unk>" , __magic_name__ : Any="<pad>" , __magic_name__ : Any="<mask>" , __magic_name__ : Optional[Any]=None , __magic_name__ : List[Any]=None , __magic_name__ : str=None , **__magic_name__ : List[str] , ) -> str:
# Mask token behave like a normal word, i.e. include the space before it
lowerCamelCase_ : Dict = AddedToken(__magic_name__ , lstrip=__magic_name__ , rstrip=__magic_name__ ) if isinstance(__magic_name__ , __magic_name__ ) else mask_token
super().__init__(
vocab_file=__magic_name__ , tokenizer_file=__magic_name__ , bos_token=__magic_name__ , eos_token=__magic_name__ , sep_token=__magic_name__ , cls_token=__magic_name__ , unk_token=__magic_name__ , pad_token=__magic_name__ , mask_token=__magic_name__ , src_lang=__magic_name__ , tgt_lang=__magic_name__ , additional_special_tokens=__magic_name__ , **__magic_name__ , )
lowerCamelCase_ : List[str] = vocab_file
lowerCamelCase_ : Optional[Any] = False if not self.vocab_file else True
lowerCamelCase_ : List[str] = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"additional_special_tokens": _additional_special_tokens} )
lowerCamelCase_ : str = {
lang_code: self.convert_tokens_to_ids(__magic_name__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCamelCase_ : Optional[int] = src_lang if src_lang is not None else "en_XX"
lowerCamelCase_ : Optional[Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCamelCase_ : List[str] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
return self._src_lang
@src_lang.setter
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : str ) -> None:
lowerCamelCase_ : int = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[int] , __magic_name__ : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ : Any = [self.sep_token_id]
lowerCamelCase_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : str , __magic_name__ : Optional[str] , __magic_name__ : Optional[str] , **__magic_name__ : Any ) -> List[str]:
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase_ : Optional[Any] = src_lang
lowerCamelCase_ : Tuple = self(__magic_name__ , add_special_tokens=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ )
lowerCamelCase_ : Union[str, Any] = self.convert_tokens_to_ids(__magic_name__ )
lowerCamelCase_ : List[Any] = tgt_lang_id
return inputs
def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[str] , __magic_name__ : str = "en_XX" , __magic_name__ : Optional[List[str]] = None , __magic_name__ : str = "ro_RO" , **__magic_name__ : Optional[int] , ) -> BatchEncoding:
lowerCamelCase_ : Optional[int] = src_lang
lowerCamelCase_ : Any = tgt_lang
return super().prepare_seqaseq_batch(__magic_name__ , __magic_name__ , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Any:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Tuple ) -> None:
lowerCamelCase_ : Union[str, Any] = self.convert_tokens_to_ids(__magic_name__ )
lowerCamelCase_ : Any = []
lowerCamelCase_ : str = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ : List[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ : int = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ : str = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str ) -> None:
lowerCamelCase_ : Dict = self.convert_tokens_to_ids(__magic_name__ )
lowerCamelCase_ : Any = []
lowerCamelCase_ : List[Any] = [self.eos_token_id, self.cur_lang_code]
lowerCamelCase_ : List[str] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCamelCase_ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCamelCase_ : int = processors.TemplateProcessing(
single=prefix_tokens_str + ["$A"] + suffix_tokens_str , pair=prefix_tokens_str + ["$A", "$B"] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : str , __magic_name__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__magic_name__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory." )
return
lowerCamelCase_ : Optional[Any] = os.path.join(
__magic_name__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__magic_name__ ):
copyfile(self.vocab_file , __magic_name__ )
return (out_vocab_file,)
| 488
| 1
|
"""simple docstring"""
def _snake_case ( UpperCAmelCase_ : list ):
if len(UpperCAmelCase_ ) <= 1:
return lst
A__ = 1
while i < len(UpperCAmelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A__ , A__ = lst[i], lst[i - 1]
i -= 1
if i == 0:
A__ = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted))
| 500
|
"""simple docstring"""
from typing import Dict, Optional
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_ : Tuple = '\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n'
SCREAMING_SNAKE_CASE_ : Optional[int] = '\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric("mean_iou")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mean_iou\': 0.47750000000000004, \'mean_accuracy\': 0.5916666666666666, \'overall_accuracy\': 0.5263157894736842, \'per_category_iou\': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), \'per_category_accuracy\': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n'
SCREAMING_SNAKE_CASE_ : int = '\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}'
def _snake_case ( UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
if label_map is not None:
for old_id, new_id in label_map.items():
A__ = new_id
# turn into Numpy arrays
A__ = np.array(UpperCAmelCase_ )
A__ = np.array(UpperCAmelCase_ )
if reduce_labels:
A__ = 255
A__ = label - 1
A__ = 255
A__ = label != ignore_index
A__ = np.not_equal(UpperCAmelCase_ , UpperCAmelCase_ )
A__ = pred_label[mask]
A__ = np.array(UpperCAmelCase_ )[mask]
A__ = pred_label[pred_label == label]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = np.histogram(UpperCAmelCase_ , bins=UpperCAmelCase_ , range=(0, num_labels - 1) )[0]
A__ = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
A__ = np.zeros((num_labels,) , dtype=np.floataa )
for result, gt_seg_map in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
A__ , A__ , A__ , A__ = intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
total_area_intersect += area_intersect
total_area_union += area_union
total_area_pred_label += area_pred_label
total_area_label += area_label
return total_area_intersect, total_area_union, total_area_pred_label, total_area_label
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : bool , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Dict[int, int]] = None , UpperCAmelCase_ : bool = False , ):
A__ , A__ , A__ , A__ = total_intersect_and_union(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# compute metrics
A__ = {}
A__ = total_area_intersect.sum() / total_area_label.sum()
A__ = total_area_intersect / total_area_union
A__ = total_area_intersect / total_area_label
A__ = np.nanmean(UpperCAmelCase_ )
A__ = np.nanmean(UpperCAmelCase_ )
A__ = all_acc
A__ = iou
A__ = acc
if nan_to_num is not None:
A__ = {metric: np.nan_to_num(UpperCAmelCase_ , nan=UpperCAmelCase_ ) for metric, metric_value in metrics.items()}
return metrics
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
"""simple docstring"""
def UpperCamelCase ( self: str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
# 1st Seq - height dim, 2nd - width dim
{
"""predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
"""references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16""" ) ) ),
} ) , reference_urls=[
"""https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py"""
] , )
def UpperCamelCase ( self: str , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] , UpperCamelCase: int , UpperCamelCase: bool , UpperCamelCase: Optional[int] = None , UpperCamelCase: Optional[Dict[int, int]] = None , UpperCamelCase: bool = False , ):
"""simple docstring"""
A__ = mean_iou(
results=UpperCamelCase , gt_seg_maps=UpperCamelCase , num_labels=UpperCamelCase , ignore_index=UpperCamelCase , nan_to_num=UpperCamelCase , label_map=UpperCamelCase , reduce_labels=UpperCamelCase , )
return iou_result
| 500
| 1
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
_a : str = logging.get_logger(__name__)
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
snake_case : Any = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def a_ ( __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
snake_case : Dict = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
snake_case : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
snake_case : Any = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
snake_case : Optional[Any] = in_proj_weight[
-encoder_config.hidden_size :, :
]
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
"""simple docstring"""
snake_case : Union[str, Any] = dct.pop(__magic_name__ )
snake_case : List[Any] = val
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
snake_case : Any = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case : List[Any] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
snake_case : List[Any] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def a_ ( __magic_name__ , __magic_name__ ) -> Tuple:
"""simple docstring"""
snake_case : Any = ViTConfig(image_size=384 , qkv_bias=__magic_name__ )
snake_case : Optional[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
snake_case : Optional[int] = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
snake_case : Union[str, Any] = 1_024
snake_case : Union[str, Any] = 4_096
snake_case : Any = 24
snake_case : List[Any] = 16
snake_case : Union[str, Any] = 1_024
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
snake_case : List[str] = False
snake_case : int = '''relu'''
snake_case : int = 1_024
snake_case : Tuple = True
snake_case : Dict = False
snake_case : Dict = False
# load HuggingFace model
snake_case : Union[str, Any] = ViTModel(__magic_name__ , add_pooling_layer=__magic_name__ )
snake_case : Dict = TrOCRForCausalLM(__magic_name__ )
snake_case : Any = VisionEncoderDecoderModel(encoder=__magic_name__ , decoder=__magic_name__ )
model.eval()
# load state_dict of original model, rename some keys
snake_case : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' , check_hash=__magic_name__ )['''model''']
snake_case : int = create_rename_keys(__magic_name__ , __magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
snake_case : Union[str, Any] = state_dict.pop(__magic_name__ )
if key.startswith('''decoder''' ) and "output_projection" not in key:
snake_case : List[str] = val
else:
snake_case : Dict = val
# load state dict
model.load_state_dict(__magic_name__ )
# Check outputs on an image
snake_case : int = ViTImageProcessor(size=encoder_config.image_size )
snake_case : Union[str, Any] = RobertaTokenizer.from_pretrained('''roberta-large''' )
snake_case : Tuple = TrOCRProcessor(__magic_name__ , __magic_name__ )
snake_case : Dict = processor(images=prepare_img(__magic_name__ ) , return_tensors='''pt''' ).pixel_values
# verify logits
snake_case : Optional[int] = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
snake_case : Any = model(pixel_values=__magic_name__ , decoder_input_ids=__magic_name__ )
snake_case : List[Any] = outputs.logits
snake_case : Union[str, Any] = torch.Size([1, 1, 50_265] )
if "trocr-base-handwritten" in checkpoint_url:
snake_case : Optional[int] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
snake_case : Any = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
snake_case : Any = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
snake_case : Union[str, Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , __magic_name__ , atol=1e-3 ), "First elements of logits not as expected"
Path(__magic_name__ ).mkdir(exist_ok=__magic_name__ )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__magic_name__ )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
_a : Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_url',
default='https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt',
type=str,
help='URL to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
_a : Optional[Any] = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 598
|
from __future__ import annotations
import bisect
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> int:
"""simple docstring"""
if hi < 0:
snake_case : Optional[int] = len(__magic_name__ )
while lo < hi:
snake_case : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
snake_case : List[str] = mid + 1
else:
snake_case : Tuple = mid
return lo
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> int:
"""simple docstring"""
if hi < 0:
snake_case : Any = len(__magic_name__ )
while lo < hi:
snake_case : List[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
snake_case : Optional[Any] = mid + 1
else:
snake_case : List[Any] = mid
return lo
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_left(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ = 0 , __magic_name__ = -1 ) -> None:
"""simple docstring"""
sorted_collection.insert(bisect_right(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) , __magic_name__ )
def a_ ( __magic_name__ , __magic_name__ ) -> int | None:
"""simple docstring"""
snake_case : List[str] = 0
snake_case : Optional[Any] = len(__magic_name__ ) - 1
while left <= right:
snake_case : Optional[int] = left + (right - left) // 2
snake_case : Any = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
snake_case : Optional[Any] = midpoint - 1
else:
snake_case : List[str] = midpoint + 1
return None
def a_ ( __magic_name__ , __magic_name__ ) -> int | None:
"""simple docstring"""
snake_case : Tuple = bisect.bisect_left(__magic_name__ , __magic_name__ )
if index != len(__magic_name__ ) and sorted_collection[index] == item:
return index
return None
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> int | None:
"""simple docstring"""
if right < left:
return None
snake_case : Tuple = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , __magic_name__ , midpoint - 1 )
else:
return binary_search_by_recursion(__magic_name__ , __magic_name__ , midpoint + 1 , __magic_name__ )
if __name__ == "__main__":
_a : Optional[Any] = input('Enter numbers separated by comma:\n').strip()
_a : List[str] = sorted(int(item) for item in user_input.split(','))
_a : str = int(input('Enter a single number to be found in the list:\n'))
_a : Tuple = binary_search(collection, target)
if result is None:
print(f"{target} was not found in {collection}.")
else:
print(f"{target} was found at position {result} in {collection}.")
| 598
| 1
|
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=12 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0 , UpperCamelCase_=None , ):
__UpperCAmelCase : Union[str, Any] = parent
__UpperCAmelCase : Union[str, Any] = batch_size
__UpperCAmelCase : Optional[Any] = seq_length
__UpperCAmelCase : Dict = is_training
__UpperCAmelCase : List[Any] = use_input_mask
__UpperCAmelCase : Union[str, Any] = use_labels
__UpperCAmelCase : Optional[int] = vocab_size
__UpperCAmelCase : List[Any] = hidden_size
__UpperCAmelCase : Optional[Any] = projection_dim
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : List[str] = num_attention_heads
__UpperCAmelCase : int = intermediate_size
__UpperCAmelCase : List[str] = dropout
__UpperCAmelCase : Union[str, Any] = attention_dropout
__UpperCAmelCase : Any = max_position_embeddings
__UpperCAmelCase : Union[str, Any] = initializer_range
__UpperCAmelCase : Optional[Any] = scope
__UpperCAmelCase : str = bos_token_id
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase : List[str] = None
if self.use_input_mask:
__UpperCAmelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
__UpperCAmelCase : List[Any] = input_mask.numpy()
__UpperCAmelCase , __UpperCAmelCase : List[Any] = input_mask.shape
__UpperCAmelCase : Tuple = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
__UpperCAmelCase : Any = 1
__UpperCAmelCase : Any = 0
__UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase_ )
def _snake_case ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : str = TFBlipTextModel(config=UpperCamelCase_ )
__UpperCAmelCase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , training=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self ):
__UpperCAmelCase : List[Any] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = config_and_inputs
__UpperCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class __A (__magic_name__ , unittest.TestCase ):
snake_case :Optional[Any] = (TFBlipTextModel,) if is_tf_available() else ()
snake_case :Any = False
snake_case :Dict = False
snake_case :Dict = False
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self )
__UpperCAmelCase : Tuple = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def _snake_case ( self ):
self.config_tester.run_common_tests()
def _snake_case ( self ):
__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _snake_case ( self ):
pass
def _snake_case ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def _snake_case ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _snake_case ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def _snake_case ( self ):
pass
@slow
def _snake_case ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = TFBlipTextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase_ )
| 10
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class __A (TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
def __init__( self , UpperCamelCase_=None , **UpperCamelCase_ ):
super().__init__(features=UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
import torch
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
__UpperCAmelCase : int = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
__UpperCAmelCase : Optional[int] = {"dtype": torch.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
__UpperCAmelCase : str = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
__UpperCAmelCase : str = np.asarray(UpperCamelCase_ )
return torch.tensor(UpperCamelCase_ , **{**default_dtype, **self.torch_tensor_kwargs} )
def _snake_case ( self , UpperCamelCase_ ):
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCamelCase_ , "__array__" ) and not isinstance(UpperCamelCase_ , torch.Tensor ):
__UpperCAmelCase : Dict = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : List[str] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
__UpperCAmelCase : Union[str, Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Union[str, Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
__UpperCAmelCase : Optional[Any] = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
__UpperCAmelCase : List[Any] = self.recursive_tensorize(UpperCamelCase_ )
__UpperCAmelCase : List[str] = self._consolidate(UpperCamelCase_ )
return column
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
__UpperCAmelCase : Any = self.python_features_decoder.decode_batch(UpperCamelCase_ )
__UpperCAmelCase : Optional[int] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
__UpperCAmelCase : Tuple = self._consolidate(batch[column_name] )
return batch
| 10
| 1
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=a_ )
class A ( a_ ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowercase_ = field(default='question-answering-extractive' ,metadata={'include_in_asdict_even_if_is_default': True} )
lowercase_ = Features({'question': Value('string' ), 'context': Value('string' )} )
lowercase_ = Features(
{
'answers': Sequence(
{
'text': Value('string' ),
'answer_start': Value('int32' ),
} )
} )
lowercase_ = 'question'
lowercase_ = 'context'
lowercase_ = 'answers'
@property
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 22
|
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case_ ( a_ ,unittest.TestCase ):
__lowerCAmelCase = FunnelTokenizer
__lowerCAmelCase = FunnelTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
def snake_case_ ( self ):
super().setUp()
a_ : int = [
"<unk>",
"<cls>",
"<sep>",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def snake_case_ ( self , **a_ ):
return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , **a_ ):
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ )
def snake_case_ ( self , a_ ):
a_ : int = "UNwant\u00E9d,running"
a_ : List[Any] = "unwanted, running"
return input_text, output_text
def snake_case_ ( self ):
a_ : int = self.tokenizer_class(self.vocab_file )
a_ : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 1_0, 8, 9] )
def snake_case_ ( self ):
a_ : List[str] = self.get_tokenizers(do_lower_case=a_ )
for tokenizer in tokenizers:
a_ : Dict = tokenizer("UNwant\u00E9d,running" )
a_ : int = len(inputs["input_ids"] ) - 1
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len )
a_ : Dict = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" )
self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
| 237
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = LEDConfig
SCREAMING_SNAKE_CASE__ :str = {}
SCREAMING_SNAKE_CASE__ :List[str] = "gelu"
def __init__( self : List[Any] , __a : Union[str, Any] , __a : List[Any]=13 , __a : int=7 , __a : str=True , __a : Any=False , __a : str=99 , __a : str=32 , __a : Union[str, Any]=2 , __a : Optional[Any]=4 , __a : List[Any]=37 , __a : List[Any]=0.1 , __a : Tuple=0.1 , __a : Dict=20 , __a : str=2 , __a : Dict=1 , __a : Any=0 , __a : List[Any]=4 , ) -> List[Any]:
_UpperCamelCase : Optional[Any] = parent
_UpperCamelCase : List[str] = batch_size
_UpperCamelCase : str = seq_length
_UpperCamelCase : str = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : Any = vocab_size
_UpperCamelCase : List[str] = hidden_size
_UpperCamelCase : Optional[Any] = num_hidden_layers
_UpperCamelCase : Dict = num_attention_heads
_UpperCamelCase : Optional[Any] = intermediate_size
_UpperCamelCase : int = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : str = max_position_embeddings
_UpperCamelCase : int = eos_token_id
_UpperCamelCase : Dict = pad_token_id
_UpperCamelCase : Optional[Any] = bos_token_id
_UpperCamelCase : str = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
_UpperCamelCase : List[str] = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
_UpperCamelCase : int = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def __SCREAMING_SNAKE_CASE ( self : int ) -> str:
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCamelCase : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCamelCase : Tuple = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase : List[Any] = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
_UpperCamelCase : Dict = prepare_led_inputs_dict(__a , __a , __a )
_UpperCamelCase : Union[str, Any] = tf.concat(
[tf.zeros_like(__a )[:, :-1], tf.ones_like(__a )[:, -1:]] , axis=-1 , )
_UpperCamelCase : Union[str, Any] = global_attention_mask
return config, inputs_dict
def __SCREAMING_SNAKE_CASE ( self : List[str] , __a : List[Any] , __a : int ) -> Tuple:
_UpperCamelCase : Tuple = TFLEDModel(config=__a ).get_decoder()
_UpperCamelCase : Tuple = inputs_dict["input_ids"]
_UpperCamelCase : int = input_ids[:1, :]
_UpperCamelCase : List[str] = inputs_dict["attention_mask"][:1, :]
_UpperCamelCase : List[Any] = 1
# first forward pass
_UpperCamelCase : Any = model(__a , attention_mask=__a , use_cache=__a )
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCamelCase : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCamelCase : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCamelCase : List[str] = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCamelCase : Union[str, Any] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCamelCase : Tuple = model(__a , attention_mask=__a )[0]
_UpperCamelCase : int = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCamelCase : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCamelCase : List[str] = output_from_no_past[:, -3:, random_slice_idx]
_UpperCamelCase : Optional[int] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_=None ,lowercase_=None ,lowercase_=None ,lowercase_=None ,) -> Dict:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase : str = tf.cast(tf.math.not_equal(lowercase_ ,config.pad_token_id ) ,tf.inta )
if decoder_attention_mask is None:
_UpperCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape ,dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] ,config.pad_token_id ) ,tf.inta ),
] ,axis=-1 ,)
if head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Any = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ :List[str] = (
{
"conversational": TFLEDForConditionalGeneration,
"feature-extraction": TFLEDModel,
"summarization": TFLEDForConditionalGeneration,
"text2text-generation": TFLEDForConditionalGeneration,
"translation": TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ :Tuple = True
SCREAMING_SNAKE_CASE__ :str = False
SCREAMING_SNAKE_CASE__ :Optional[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
_UpperCamelCase : int = TFLEDModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.config_tester.run_common_tests()
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
_UpperCamelCase, _UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase : Optional[int] = tf.zeros_like(inputs_dict["attention_mask"] )
_UpperCamelCase : Union[str, Any] = 2
_UpperCamelCase : str = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["global_attention_mask"] , )
_UpperCamelCase : Dict = True
_UpperCamelCase : str = self.model_tester.seq_length
_UpperCamelCase : Union[str, Any] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__a : Optional[int] ):
_UpperCamelCase : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__a : Optional[Any] ):
_UpperCamelCase : Union[str, Any] = [t.numpy() for t in outputs.encoder_attentions]
_UpperCamelCase : List[Any] = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
_UpperCamelCase : Dict = True
_UpperCamelCase : Optional[Any] = False
_UpperCamelCase : int = False
_UpperCamelCase : Optional[int] = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
_UpperCamelCase : Any = len(__a )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
if self.is_encoder_decoder:
_UpperCamelCase : Optional[Any] = model_class(__a )
_UpperCamelCase : List[Any] = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_decoder_attentions_output(__a )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCamelCase : int = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : str = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
# Check attention is always last and order is fine
_UpperCamelCase : Any = True
_UpperCamelCase : List[str] = True
_UpperCamelCase : Tuple = model_class(__a )
_UpperCamelCase : int = model(self._prepare_for_class(__a , __a ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__a ) )
self.assertEqual(model.config.output_hidden_states , __a )
check_encoder_attentions_output(__a )
@unittest.skip("LED keeps using potentially symbolic tensors in conditionals and breaks tracing." )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
pass
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
# TODO: Head-masking not yet implement
pass
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
return tf.constant(lowercase_ ,dtype=tf.intaa )
lowerCamelCase__ = 1E-4
@slow
@require_tf
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Any = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" ).led
# change to intended input here
_UpperCamelCase : int = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Optional[int] = model(**__a )[0]
_UpperCamelCase : Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Tuple = tf.convert_to_tensor(
[[2.30_50, 2.82_79, 0.65_31], [-1.84_57, -0.14_55, -3.56_61], [-1.01_86, 0.45_86, -2.20_43]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
_UpperCamelCase : Optional[int] = TFLEDForConditionalGeneration.from_pretrained("allenai/led-base-16384" )
# change to intended input here
_UpperCamelCase : Optional[int] = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : List[str] = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
_UpperCamelCase : Optional[Any] = prepare_led_inputs_dict(model.config , __a , __a )
_UpperCamelCase : Union[str, Any] = model(**__a )[0]
_UpperCamelCase : int = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __a )
# change to expected output here
_UpperCamelCase : Optional[int] = tf.convert_to_tensor(
[[33.65_07, 6.45_72, 16.80_89], [5.87_39, -2.42_38, 11.29_02], [-3.21_39, -4.31_49, 4.27_83]] , )
tf.debugging.assert_near(output[:, :3, :3] , __a , atol=1e-3 , rtol=1e-3 )
| 51
|
"""simple docstring"""
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("KEY")
lowerCamelCase__ = TypeVar("VAL")
@dataclass(frozen=_UpperCamelCase , slots=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :KEY
SCREAMING_SNAKE_CASE__ :VAL
class __SCREAMING_SNAKE_CASE ( _Item ):
'''simple docstring'''
def __init__( self : List[str] ) -> None:
super().__init__(__a , __a )
def __bool__( self : Dict ) -> bool:
return False
lowerCamelCase__ = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL] ):
'''simple docstring'''
def __init__( self : int , __a : int = 8 , __a : float = 0.75 ) -> None:
_UpperCamelCase : str = initial_block_size
_UpperCamelCase : list[_Item | None] = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
_UpperCamelCase : List[str] = capacity_factor
_UpperCamelCase : Dict = 0
def __SCREAMING_SNAKE_CASE ( self : int , __a : KEY ) -> int:
return hash(__a ) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : int ) -> int:
return (ind + 1) % len(self._buckets )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : int , __a : KEY , __a : VAL ) -> bool:
_UpperCamelCase : List[Any] = self._buckets[ind]
if not stored:
_UpperCamelCase : Tuple = _Item(__a , __a )
self._len += 1
return True
elif stored.key == key:
_UpperCamelCase : Union[str, Any] = _Item(__a , __a )
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> bool:
_UpperCamelCase : Any = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> bool:
if len(self._buckets ) <= self._initial_block_size:
return False
_UpperCamelCase : List[str] = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : int ) -> None:
_UpperCamelCase : Any = self._buckets
_UpperCamelCase : List[Any] = [None] * new_size
_UpperCamelCase : List[str] = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def __SCREAMING_SNAKE_CASE ( self : int ) -> None:
self._resize(len(self._buckets ) * 2 )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> None:
self._resize(len(self._buckets ) // 2 )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : KEY ) -> Iterator[int]:
_UpperCamelCase : str = self._get_bucket_index(__a )
for _ in range(len(self._buckets ) ):
yield ind
_UpperCamelCase : Tuple = self._get_next_ind(__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __a : KEY , __a : VAL ) -> None:
for ind in self._iterate_buckets(__a ):
if self._try_set(__a , __a , __a ):
break
def __setitem__( self : int , __a : KEY , __a : VAL ) -> None:
if self._is_full():
self._size_up()
self._add_item(__a , __a )
def __delitem__( self : str , __a : KEY ) -> None:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
raise KeyError(__a )
if item is _deleted:
continue
if item.key == key:
_UpperCamelCase : List[Any] = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : str , __a : KEY ) -> VAL:
for ind in self._iterate_buckets(__a ):
_UpperCamelCase : Tuple = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(__a )
def __len__( self : List[Any] ) -> int:
return self._len
def __iter__( self : List[str] ) -> Iterator[KEY]:
yield from (item.key for item in self._buckets if item)
def __repr__( self : List[str] ) -> str:
_UpperCamelCase : Optional[int] = " ,".join(
F'''{item.key}: {item.val}''' for item in self._buckets if item )
return F'''HashMap({val_string})'''
| 51
| 1
|
"""simple docstring"""
def a_ ( lowercase__ :Dict ):
if not numbers:
return 0
if not isinstance(lowercase__, (list, tuple) ) or not all(
isinstance(lowercase__, lowercase__ ) for number in numbers ):
raise ValueError("""numbers must be an iterable of integers""" )
__lowerCamelCase = numbers[0]
for i in range(1, len(lowercase__ ) ):
# update the maximum and minimum subarray products
__lowerCamelCase = numbers[i]
if number < 0:
__lowerCamelCase = min_till_now, max_till_now
__lowerCamelCase = max(lowercase__, max_till_now * number )
__lowerCamelCase = min(lowercase__, min_till_now * number )
# update the maximum product found till now
__lowerCamelCase = max(lowercase__, lowercase__ )
return max_prod
| 281
|
def _a ( lowerCamelCase ):
if num < 0:
return False
lowerCamelCase : int = num
lowerCamelCase : int = 0
while num > 0:
lowerCamelCase : str = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 681
| 0
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class _UpperCAmelCase ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ :Dict = pad_token_id
lowerCAmelCase__ :Any = max_length
lowerCAmelCase__ :Optional[Any] = vocab
lowerCAmelCase__ :Optional[int] = merges
lowerCAmelCase__ :Dict = BytePairTokenizer(_lowerCAmelCase , _lowerCAmelCase , sequence_length=_lowerCAmelCase )
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :int = [" ".join(_lowerCAmelCase ) for m in tokenizer.bpe_ranks.keys()]
lowerCAmelCase__ :Dict = tokenizer.get_vocab()
return cls(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = GPTaTokenizer.from_pretrained(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
return cls.from_tokenizer(_lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
@classmethod
def snake_case_ ( cls , _lowerCAmelCase ):
'''simple docstring'''
return cls(**_lowerCAmelCase )
def snake_case_ ( self ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.tf_tokenizer(_lowerCAmelCase )
lowerCAmelCase__ :str = tf.ones_like(_lowerCAmelCase )
if self.pad_token_id is not None:
# pad the tokens up to max length
lowerCAmelCase__ :Optional[Any] = max_length if max_length is not None else self.max_length
if max_length is not None:
lowerCAmelCase__ ,lowerCAmelCase__ :int = pad_model_inputs(
_lowerCAmelCase , max_seq_length=_lowerCAmelCase , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 111
|
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _UpperCAmelCase ( _A , unittest.TestCase ):
"""simple docstring"""
A = KandinskyVaaPipeline
A = [
'''image_embeds''',
'''negative_image_embeds''',
]
A = ['''image_embeds''', '''negative_image_embeds''']
A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
A = False
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return 32
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim
@property
def snake_case_ ( self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def snake_case_ ( self ):
'''simple docstring'''
return 100
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase__ :List[str] = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def snake_case_ ( self ):
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = self.dummy_unet
lowerCAmelCase__ :List[str] = self.dummy_movq
lowerCAmelCase__ :List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_lowerCAmelCase , )
lowerCAmelCase__ :Optional[Any] = {
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
'''simple docstring'''
lowerCAmelCase__ :List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
lowerCAmelCase__ :Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith("mps" ):
lowerCAmelCase__ :List[Any] = torch.manual_seed(_lowerCAmelCase )
else:
lowerCAmelCase__ :Optional[int] = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = {
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"guidance_scale": 4.0,
"num_inference_steps": 2,
"output_type": "np",
}
return inputs
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = "cpu"
lowerCAmelCase__ :Dict = self.get_dummy_components()
lowerCAmelCase__ :Tuple = self.pipeline_class(**_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :str = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
lowerCAmelCase__ :Dict = output.images
lowerCAmelCase__ :str = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
lowerCAmelCase__ :Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ :List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ :Tuple = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
lowerCAmelCase__ :str = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
lowerCAmelCase__ :str = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
lowerCAmelCase__ :Union[str, Any] = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = "red cat, 4k photo"
lowerCAmelCase__ :Tuple = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCAmelCase__ ,lowerCAmelCase__ :Dict = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase__ :List[Any] = torch.Generator(device="cuda" ).manual_seed(0 )
lowerCAmelCase__ :List[str] = pipeline(
image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , output_type="np" , )
lowerCAmelCase__ :int = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase )
| 111
| 1
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
SCREAMING_SNAKE_CASE__ = False
@skip_mps
class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Dict = StableDiffusionAttendAndExcitePipeline
__SCREAMING_SNAKE_CASE : Tuple = False
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
__SCREAMING_SNAKE_CASE : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def __lowerCAmelCase ( cls : List[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Optional[int] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Tuple = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=SCREAMING_SNAKE_CASE__ , )
__a : List[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , )
torch.manual_seed(0 )
__a : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
__a : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
__a : Tuple = CLIPTextModel(SCREAMING_SNAKE_CASE__ )
__a : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : Any = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCAmelCase ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str=0 ):
'''simple docstring'''
if str(SCREAMING_SNAKE_CASE__ ).startswith('mps' ):
__a : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__a : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__a : List[str] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = 'cpu'
__a : Optional[int] = self.get_dummy_components()
__a : Union[str, Any] = self.pipeline_class(**SCREAMING_SNAKE_CASE__ )
pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__a : int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__a : int = pipe(**SCREAMING_SNAKE_CASE__ ).images
__a : Optional[int] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 6_4, 6_4, 3) )
__a : Optional[Any] = np.array(
[0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] )
__a : Any = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(SCREAMING_SNAKE_CASE__ , 1e-3 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5e-4 )
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase( unittest.TestCase ):
@classmethod
def __lowerCAmelCase ( cls : str ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
@classmethod
def __lowerCAmelCase ( cls : Tuple ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : List[str] = torch.manual_seed(5_1 )
__a : Optional[int] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=SCREAMING_SNAKE_CASE__ , torch_dtype=torch.floataa )
pipe.to('cuda' )
__a : List[str] = 'a painting of an elephant with glasses'
__a : Any = [5, 7]
__a : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE__ , token_indices=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
__a : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1
| 47
|
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Any = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
__lowerCamelCase : Any = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Optional[int]:
A__ : Dict ={
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
A__ : Optional[Any] =int(re.match(R'''.*layer_(\d*).*''', snake_case_ )[1] )
layer_number -= 3
return f'h.{layer_number}.' + key
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Any:
if dtype == torch.bool:
return 1 / 8
A__ : int =re.search(R'''[^\d](\d+)$''', str(snake_case_ ) )
if bit_search is None:
raise ValueError(f'`dtype` is not a valid dtype: {dtype}.' )
A__ : Optional[int] =int(bit_search.groups()[0] )
return bit_size // 8
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_, snake_case_ ) -> Any:
# Construct model
if bloom_config_file == "":
A__ : Optional[int] =BloomConfig()
else:
A__ : Union[str, Any] =BloomConfig.from_json_file(snake_case_ )
if shard_model:
A__ : Any =os.listdir(snake_case_ )
A__ : Dict =sorted(filter(lambda snake_case_ : s.startswith('''layer''' ) and "model_00" in s, snake_case_ ) )
A__ : List[str] ={'''weight_map''': {}, '''metadata''': {}}
A__ : Optional[int] =0
A__ : Tuple =None
A__ : Dict =BloomConfig()
for j, file in enumerate(snake_case_ ):
print('''Processing file: {}'''.format(snake_case_ ) )
A__ : List[Any] =None
for i in range(snake_case_ ):
# load all TP files
A__ : str =file.replace('''model_00''', f'model_0{i}' )
A__ : int =torch.load(os.path.join(snake_case_, snake_case_ ), map_location='''cpu''' )
# Rename keys in the transformers names
A__ : int =list(temp.keys() )
for key in keys:
A__ : Union[str, Any] =temp.pop(snake_case_ )
if tensors is None:
A__ : Any =temp
else:
for key in tensors.keys():
if any(key.endswith(snake_case_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A__ : List[str] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A__ : str =torch.cat([tensors[key], temp[key]], dim=snake_case_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A__ : Tuple =tensors[key] / pretraining_tp
torch.save(
snake_case_, os.path.join(
snake_case_, '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ), str(len(snake_case_ ) ).zfill(5 ) ), ), )
for key in tensors.keys():
A__ : List[str] =tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
A__ : List[str] ='''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ), str(len(snake_case_ ) ).zfill(5 ) )
A__ : Any =BloomConfig()
A__ : Optional[Any] =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
A__ : Union[str, Any] =total_size
with open(snake_case_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(snake_case_, WEIGHTS_NAME + '''.index.json''' ), '''w''', encoding='''utf-8''' ) as f:
A__ : str =json.dumps(snake_case_, indent=2, sort_keys=snake_case_ ) + '''\n'''
f.write(snake_case_ )
else:
A__ : Tuple =BloomModel(snake_case_ )
A__ : Optional[Any] =os.listdir(snake_case_ )
A__ : Union[str, Any] =sorted(filter(lambda snake_case_ : s.startswith('''layer''' ) and "model_00" in s, snake_case_ ) )
A__ : Union[str, Any] =None
for i, file in enumerate(snake_case_ ):
A__ : int =None
for i in range(snake_case_ ):
# load all TP files
A__ : List[str] =file.replace('''model_00''', f'model_0{i}' )
A__ : Optional[int] =torch.load(os.path.join(snake_case_, snake_case_ ), map_location='''cpu''' )
# Rename keys in the transformers names
A__ : Union[str, Any] =list(temp.keys() )
for key in keys:
A__ : Union[str, Any] =temp.pop(snake_case_ )
if tensors is None:
A__ : Union[str, Any] =temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(snake_case_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
A__ : List[Any] =1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
A__ : Dict =torch.cat([tensors[key], temp[key]], dim=snake_case_ )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(snake_case_ ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
A__ : Tuple =tensors[key] / pretraining_tp
A__ : int =model.load_state_dict(snake_case_, strict=snake_case_ )
assert not other_keys.unexpected_keys, f'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
A__ : List[Any] =set(other_keys.missing_keys )
else:
A__ : Tuple =missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(snake_case_, exist_ok=snake_case_ )
A__ : Dict =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
A__ : Optional[int] =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
A__ : int =model.to(config.torch_dtype )
torch.save(model.state_dict(), snake_case_ )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(snake_case_, '''w''', encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 416
| 0
|
'''simple docstring'''
from maths.prime_factors import prime_factors
def __lowerCamelCase ( __lowerCAmelCase : int ) -> int:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
snake_case = F'''Input value of [number={number}] must be an integer'''
raise TypeError(__lowerCAmelCase )
if number < 1:
raise ValueError("""Input must be a positive integer""" )
return -1 if len(prime_factors(__lowerCAmelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517
|
'''simple docstring'''
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase ( self : List[str] )-> Optional[int]:
snake_case = inspect.getfile(accelerate.test_utils )
snake_case = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def lowerCAmelCase ( self : int )-> List[str]:
snake_case = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case = [sys.executable] + distributed_args
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 517
| 1
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : List[str] = inspect.getfile(accelerate.test_utils )
snake_case__ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_script.py"""] )
snake_case__ : List[str] = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_distributed_data_loop.py"""] )
snake_case__ : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """test_ops.py"""] )
@require_multi_gpu
def __UpperCamelCase ( self ):
print(f"Found {torch.cuda.device_count()} devices." )
snake_case__ : List[Any] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self ):
print(f"Found {torch.cuda.device_count()} devices." )
snake_case__ : Union[str, Any] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.operation_file_path]
print(f"Command: {cmd}" )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self ):
snake_case__ : Tuple = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
@require_multi_gpu
def __UpperCamelCase ( self ):
print(f"Found {torch.cuda.device_count()} devices, using 2 devices only" )
snake_case__ : Optional[Any] = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="""0,1""" ):
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy() )
if __name__ == "__main__":
A_ : Union[str, Any] = Accelerator()
A_ : List[Any] = (accelerator.state.process_index + 2, 10)
A_ : Tuple = torch.randint(0, 10, shape).to(accelerator.device)
A_ : List[Any] = ""
A_ : Any = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
A_ : Any = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
A_ : Optional[Any] = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 38
|
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"encoder.layer_norm_for_extract": "layer_norm_for_extract",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"label_embs_concat": "label_embeddings_concat",
"mask_emb": "masked_spec_embed",
"spk_proj": "speaker_proj",
}
SCREAMING_SNAKE_CASE = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"label_embeddings_concat",
"speaker_proj",
"layer_norm_for_extract",
]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
for attribute in key.split("." ):
UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
UpperCAmelCase_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
UpperCAmelCase_ = value
elif weight_type == "weight_g":
UpperCAmelCase_ = value
elif weight_type == "weight_v":
UpperCAmelCase_ = value
elif weight_type == "bias":
UpperCAmelCase_ = value
else:
UpperCAmelCase_ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ = []
UpperCAmelCase_ = fairseq_model.state_dict()
UpperCAmelCase_ = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
UpperCAmelCase_ = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == "group" , )
UpperCAmelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCAmelCase_ = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
UpperCAmelCase_ = True
if "*" in mapped_key:
UpperCAmelCase_ = name.split(__SCREAMING_SNAKE_CASE )[0].split("." )[-2]
UpperCAmelCase_ = mapped_key.replace("*" , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
UpperCAmelCase_ = "weight_g"
elif "weight_v" in name:
UpperCAmelCase_ = "weight_v"
elif "bias" in name:
UpperCAmelCase_ = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
UpperCAmelCase_ = "weight"
else:
UpperCAmelCase_ = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f'''Unused weights: {unused_weights}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ = full_name.split("conv_layers." )[-1]
UpperCAmelCase_ = name.split("." )
UpperCAmelCase_ = int(items[0] )
UpperCAmelCase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
UpperCAmelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
UpperCAmelCase_ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
UpperCAmelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
UpperCAmelCase_ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True ) -> Optional[Any]:
if config_path is not None:
UpperCAmelCase_ = UniSpeechSatConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = UniSpeechSatConfig()
UpperCAmelCase_ = ""
if is_finetuned:
UpperCAmelCase_ = UniSpeechSatForCTC(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = UniSpeechSatForPreTraining(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
UpperCAmelCase_ = model[0].eval()
recursively_load_weights(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 579
| 0
|
import argparse
import os
import torch
from transformers import (
XLNetConfig,
XLNetForQuestionAnswering,
XLNetForSequenceClassification,
XLNetLMHeadModel,
load_tf_weights_in_xlnet,
)
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
_SCREAMING_SNAKE_CASE = {
"""cola""": 2,
"""mnli""": 3,
"""mrpc""": 2,
"""sst-2""": 2,
"""sts-b""": 1,
"""qqp""": 2,
"""qnli""": 2,
"""rte""": 2,
"""wnli""": 2,
}
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a=None ):
# Initialise PyTorch model
snake_case_ : Dict = XLNetConfig.from_json_file(__a )
snake_case_ : List[Any] = finetuning_task.lower() if finetuning_task is not None else ''
if finetuning_task in GLUE_TASKS_NUM_LABELS:
print(f"""Building PyTorch XLNetForSequenceClassification model from configuration: {config}""" )
snake_case_ : List[str] = finetuning_task
snake_case_ : Optional[Any] = GLUE_TASKS_NUM_LABELS[finetuning_task]
snake_case_ : List[Any] = XLNetForSequenceClassification(__a )
elif "squad" in finetuning_task:
snake_case_ : Optional[int] = finetuning_task
snake_case_ : Union[str, Any] = XLNetForQuestionAnswering(__a )
else:
snake_case_ : List[str] = XLNetLMHeadModel(__a )
# Load weights from tf checkpoint
load_tf_weights_in_xlnet(__a , __a , __a )
# Save pytorch-model
snake_case_ : Tuple = os.path.join(__a , __a )
snake_case_ : Union[str, Any] = os.path.join(__a , __a )
print(f"""Save PyTorch model to {os.path.abspath(__a )}""" )
torch.save(model.state_dict() , __a )
print(f"""Save configuration file to {os.path.abspath(__a )}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--xlnet_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained XLNet model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--finetuning_task""",
default=None,
type=str,
help="""Name of a task on which the XLNet TensorFlow model was fine-tuned""",
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
print(args)
convert_xlnet_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.xlnet_config_file, args.pytorch_dump_folder_path, args.finetuning_task
)
| 534
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
# Load checkpoint
snake_case_ : Union[str, Any] = torch.load(__a , map_location='cpu' )
snake_case_ : Union[str, Any] = chkpt['model']
# We have the base model one level deeper than the original XLM repository
snake_case_ : str = {}
for k, v in state_dict.items():
if "pred_layer" in k:
snake_case_ : Tuple = v
else:
snake_case_ : Dict = v
snake_case_ : Tuple = chkpt['params']
snake_case_ : List[Any] = {n: v for n, v in config.items() if not isinstance(__a , (torch.FloatTensor, numpy.ndarray) )}
snake_case_ : Optional[int] = chkpt['dico_word2id']
snake_case_ : List[str] = {s + '</w>' if s.find('@@' ) == -1 and i > 13 else s.replace('@@' , '' ): i for s, i in vocab.items()}
# Save pytorch-model
snake_case_ : List[str] = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
snake_case_ : Dict = pytorch_dump_folder_path + '/' + CONFIG_NAME
snake_case_ : Any = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['vocab_file']
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(__a , __a )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(__a , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(__a , indent=2 ) + '\n' )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--xlm_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
_SCREAMING_SNAKE_CASE = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 534
| 1
|
'''simple docstring'''
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class A ( _a ):
lowercase_ = 'Wav2Vec2FeatureExtractor'
lowercase_ = 'AutoTokenizer'
def __init__( self : Tuple , lowerCAmelCase_ : Dict , lowerCAmelCase_ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
_a = self.feature_extractor
_a = False
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , lowerCAmelCase_ : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Tuple:
"""simple docstring"""
try:
return super().from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
''' include a `tokenizer_class` attribute is deprecated and will be '''
'''removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'''
''' attribute to either your `config.json` or `tokenizer_config.json` '''
'''file to suppress this warning: ''' , lowerCAmelCase_ , )
_a = WavaVecaFeatureExtractor.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
_a = WavaVecaCTCTokenizer.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
return cls(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
def __call__( self : str , *lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Any ) -> Optional[int]:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase_ , **lowerCAmelCase_ )
if "raw_speech" in kwargs:
warnings.warn('''Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.''' )
_a = kwargs.pop('''raw_speech''' )
else:
_a = kwargs.pop('''audio''' , lowerCAmelCase_ )
_a = kwargs.pop('''sampling_rate''' , lowerCAmelCase_ )
_a = kwargs.pop('''text''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if audio is None and text is None:
raise ValueError('''You need to specify either an `audio` or `text` input to process.''' )
if audio is not None:
_a = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
if text is not None:
_a = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_a = encodings['''input_ids''']
return inputs
def __lowerCAmelCase ( self : str , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Any ) -> Any:
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*lowerCAmelCase_ , **lowerCAmelCase_ )
_a = kwargs.pop('''input_features''' , lowerCAmelCase_ )
_a = kwargs.pop('''labels''' , lowerCAmelCase_ )
if len(lowerCAmelCase_ ) > 0:
_a = args[0]
_a = args[1:]
if input_features is not None:
_a = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is not None:
_a = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
_a = labels['''input_ids''']
return input_features
def __lowerCAmelCase ( self : Optional[int] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : List[Any] ) -> List[str]:
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@contextmanager
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
warnings.warn(
'''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '''
'''labels by using the argument `text` of the regular `__call__` method (either in the same call as '''
'''your audio inputs, or in a separate call.''' )
_a = True
_a = self.tokenizer
yield
_a = self.feature_extractor
_a = False
| 22
|
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : list ) -> float:
SCREAMING_SNAKE_CASE_ : Dict =0
while len(UpperCAmelCase_ ) > 1:
SCREAMING_SNAKE_CASE_ : Tuple =0
# Consider two files with minimum cost to be merged
for _ in range(2 ):
SCREAMING_SNAKE_CASE_ : int =files.index(min(UpperCAmelCase_ ) )
temp += files[min_index]
files.pop(UpperCAmelCase_ )
files.append(UpperCAmelCase_ )
optimal_merge_cost += temp
return optimal_merge_cost
if __name__ == "__main__":
import doctest
doctest.testmod()
| 443
| 0
|
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ):
# Initialise PyTorch model
__SCREAMING_SNAKE_CASE : Optional[int] = AlbertConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
__SCREAMING_SNAKE_CASE : int = AlbertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__lowerCAmelCase : str =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--albert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained ALBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__lowerCAmelCase : Any =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 704
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :int ) -> Optional[int]:
return f'''gaussian_noise_s={seed}_shape={'_'.join([str(lowerCAmelCase__ ) for s in shape] )}.npy'''
def __magic_name__( self :List[str] ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :Any=(4, 4, 64, 64) , lowerCAmelCase__ :List[Any]=False ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) , dtype=lowerCAmelCase__ )
return image
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :int="CompVis/stable-diffusion-v1-4" ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Optional[int] = '''bf16''' if fpaa else None
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Any = FlaxUNetaDConditionModel.from_pretrained(
lowerCAmelCase__ , subfolder='''unet''' , dtype=lowerCAmelCase__ , revision=lowerCAmelCase__ )
return model, params
def __magic_name__( self :Any , lowerCAmelCase__ :str=0 , lowerCAmelCase__ :Optional[int]=(4, 77, 768) , lowerCAmelCase__ :str=False ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : str = jnp.bfloataa if fpaa else jnp.floataa
__SCREAMING_SNAKE_CASE : Any = jnp.array(load_hf_numpy(self.get_file_format(lowerCAmelCase__ , lowerCAmelCase__ ) ) , dtype=lowerCAmelCase__ )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1_000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
] )
def __magic_name__( self :str , lowerCAmelCase__ :str , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[int] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = self.get_latents(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.get_encoder_hidden_states(lowerCAmelCase__ , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = model.apply(
{'''params''': params} , lowerCAmelCase__ , jnp.array(lowerCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase__ , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : Tuple = jnp.array(lowerCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1_000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
] )
def __magic_name__( self :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :str ) -> str:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_latents(lowerCAmelCase__ , shape=(4, 4, 96, 96) , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = self.get_encoder_hidden_states(lowerCAmelCase__ , shape=(4, 77, 1_024) , fpaa=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = model.apply(
{'''params''': params} , lowerCAmelCase__ , jnp.array(lowerCAmelCase__ , dtype=jnp.intaa ) , encoder_hidden_states=lowerCAmelCase__ , ).sample
assert sample.shape == latents.shape
__SCREAMING_SNAKE_CASE : Optional[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE : str = jnp.array(lowerCAmelCase__ , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-2 )
| 260
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _UpperCamelCase ( __snake_case ):
"""simple docstring"""
lowerCAmelCase = 42
@flax_register_to_config
class _UpperCamelCase ( nn.Module , __snake_case , __snake_case ):
"""simple docstring"""
lowerCAmelCase = 3_2
lowerCAmelCase = 4
lowerCAmelCase = 4
lowerCAmelCase = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase = False
lowerCAmelCase = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0)
lowerCAmelCase = 2
lowerCAmelCase = 8
lowerCAmelCase = None
lowerCAmelCase = 1_2_8_0
lowerCAmelCase = 0.0
lowerCAmelCase = False
lowerCAmelCase = jnp.floataa
lowerCAmelCase = True
lowerCAmelCase = 0
lowerCAmelCase = False
def _UpperCAmelCase ( self , a__ ) -> FrozenDict:
# init input tensors
A = (1, self.in_channels, self.sample_size, self.sample_size)
A = jnp.zeros(a__ , dtype=jnp.floataa )
A = jnp.ones((1,) , dtype=jnp.intaa )
A = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
A , A = jax.random.split(a__ )
A = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(a__ , a__ , a__ , a__ )["params"]
def _UpperCAmelCase ( self ) -> str:
A = self.block_out_channels
A = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
A = self.num_attention_heads or self.attention_head_dim
# input
A = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
A = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
A = FlaxTimestepEmbedding(a__ , dtype=self.dtype )
A = self.only_cross_attention
if isinstance(a__ , a__ ):
A = (only_cross_attention,) * len(self.down_block_types )
if isinstance(a__ , a__ ):
A = (num_attention_heads,) * len(self.down_block_types )
# down
A = []
A = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
A = output_channel
A = block_out_channels[i]
A = i == len(a__ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
A = FlaxCrossAttnDownBlockaD(
in_channels=a__ , out_channels=a__ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A = FlaxDownBlockaD(
in_channels=a__ , out_channels=a__ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(a__ )
A = down_blocks
# mid
A = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
A = []
A = list(reversed(a__ ) )
A = list(reversed(a__ ) )
A = list(reversed(a__ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
A = output_channel
A = reversed_block_out_channels[i]
A = reversed_block_out_channels[min(i + 1 , len(a__ ) - 1 )]
A = i == len(a__ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
A = FlaxCrossAttnUpBlockaD(
in_channels=a__ , out_channels=a__ , prev_output_channel=a__ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
A = FlaxUpBlockaD(
in_channels=a__ , out_channels=a__ , prev_output_channel=a__ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(a__ )
A = output_channel
A = up_blocks
# out
A = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
A = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self , a__ , a__ , a__ , a__=None , a__=None , a__ = True , a__ = False , ) -> Union[FlaxUNetaDConditionOutput, Tuple]:
# 1. time
if not isinstance(a__ , jnp.ndarray ):
A = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(a__ , jnp.ndarray ) and len(timesteps.shape ) == 0:
A = timesteps.astype(dtype=jnp.floataa )
A = jnp.expand_dims(a__ , 0 )
A = self.time_proj(a__ )
A = self.time_embedding(a__ )
# 2. pre-process
A = jnp.transpose(a__ , (0, 2, 3, 1) )
A = self.conv_in(a__ )
# 3. down
A = (sample,)
for down_block in self.down_blocks:
if isinstance(a__ , a__ ):
A , A = down_block(a__ , a__ , a__ , deterministic=not train )
else:
A , A = down_block(a__ , a__ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
A = ()
for down_block_res_sample, down_block_additional_residual in zip(
a__ , a__ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
A = new_down_block_res_samples
# 4. mid
A = self.mid_block(a__ , a__ , a__ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
A = down_block_res_samples[-(self.layers_per_block + 1) :]
A = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(a__ , a__ ):
A = up_block(
a__ , temb=a__ , encoder_hidden_states=a__ , res_hidden_states_tuple=a__ , deterministic=not train , )
else:
A = up_block(a__ , temb=a__ , res_hidden_states_tuple=a__ , deterministic=not train )
# 6. post-process
A = self.conv_norm_out(a__ )
A = nn.silu(a__ )
A = self.conv_out(a__ )
A = jnp.transpose(a__ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=a__ )
| 641
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase : Optional[int] = False
@skip_mps
class _UpperCamelCase ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = StableDiffusionAttendAndExcitePipeline
lowerCAmelCase = False
lowerCAmelCase = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _UpperCAmelCase ( cls ) -> List[Any]:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Dict:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a__ , )
A = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A = CLIPTextModel(a__ )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _UpperCAmelCase ( self , a__ , a__=0 ) -> Optional[Any]:
if str(a__ ).startswith("""mps""" ):
A = torch.manual_seed(a__ )
else:
A = torch.Generator(device=a__ ).manual_seed(a__ )
A = A = {
"""prompt""": """a cat and a frog""",
"""token_indices""": [2, 5],
"""generator""": generator,
"""num_inference_steps""": 1,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
"""max_iter_to_alter""": 2,
"""thresholds""": {0: 0.7},
}
return inputs
def _UpperCAmelCase ( self ) -> Union[str, Any]:
A = """cpu"""
A = self.get_dummy_components()
A = self.pipeline_class(**a__ )
pipe.to(a__ )
pipe.set_progress_bar_config(disable=a__ )
A = self.get_dummy_inputs(a__ )
A = pipe(**a__ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
A = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a__ , 1e-3 )
def _UpperCAmelCase ( self ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> Dict:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _UpperCAmelCase ( self ) -> Any:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _UpperCAmelCase ( self ) -> str:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _UpperCAmelCase ( self ) -> int:
super().test_save_load_local(expected_max_difference=5e-4 )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@classmethod
def _UpperCAmelCase ( cls ) -> Tuple:
super().setUpClass()
torch.use_deterministic_algorithms(a__ )
@classmethod
def _UpperCAmelCase ( cls ) -> Dict:
super().tearDownClass()
torch.use_deterministic_algorithms(a__ )
def _UpperCAmelCase ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCAmelCase ( self ) -> int:
A = torch.manual_seed(51 )
A = StableDiffusionAttendAndExcitePipeline.from_pretrained(
"""CompVis/stable-diffusion-v1-4""" , safety_checker=a__ , torch_dtype=torch.floataa )
pipe.to("""cuda""" )
A = """a painting of an elephant with glasses"""
A = [5, 7]
A = pipe(
prompt=a__ , token_indices=a__ , guidance_scale=7.5 , generator=a__ , num_inference_steps=5 , max_iter_to_alter=5 , output_type="""numpy""" , ).images[0]
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy""" )
assert np.abs((expected_image - image).max() ) < 5e-1
| 641
| 1
|
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
_lowercase = re.compile(r'\s+')
def __lowerCAmelCase ( _UpperCamelCase ) -> Tuple:
'''simple docstring'''
return {"hash": hashlib.mda(re.sub(_UpperCamelCase , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def __lowerCAmelCase ( _UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] = [len(_UpperCamelCase ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(_UpperCamelCase ), "line_max": max(_UpperCamelCase )}
def __lowerCAmelCase ( _UpperCamelCase ) -> List[str]:
'''simple docstring'''
lowerCamelCase__: Tuple = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Any:
'''simple docstring'''
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=5 ) -> Tuple:
'''simple docstring'''
lowerCamelCase__: Any = ["""auto-generated""", """autogenerated""", """automatically generated"""]
lowerCamelCase__: Dict = example["""content"""].splitlines()
for _, line in zip(range(_UpperCamelCase ) , _UpperCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=5 , _UpperCamelCase=0.05 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__: int = ["""unit tests""", """test file""", """configuration file"""]
lowerCamelCase__: Tuple = example["""content"""].splitlines()
lowerCamelCase__: Optional[Any] = 0
lowerCamelCase__: List[Any] = 0
# first test
for _, line in zip(range(_UpperCamelCase ) , _UpperCamelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
lowerCamelCase__: Optional[Any] = example["""content"""].count("""\n""" )
lowerCamelCase__: Tuple = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple = ["""def """, """class """, """for """, """while """]
lowerCamelCase__: Union[str, Any] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase=4 ) -> str:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = example["""content"""].splitlines()
lowerCamelCase__: Tuple = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def __lowerCAmelCase ( _UpperCamelCase ) -> Any:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = tokenizer(example["""content"""] , truncation=_UpperCamelCase )["""input_ids"""]
lowerCamelCase__: int = len(example["""content"""] ) / len(_UpperCamelCase )
return {"ratio": ratio}
def __lowerCAmelCase ( _UpperCamelCase ) -> Dict:
'''simple docstring'''
lowerCamelCase__: str = {}
results.update(get_hash(_UpperCamelCase ) )
results.update(line_stats(_UpperCamelCase ) )
results.update(alpha_stats(_UpperCamelCase ) )
results.update(char_token_ratio(_UpperCamelCase ) )
results.update(is_autogenerated(_UpperCamelCase ) )
results.update(is_config_or_test(_UpperCamelCase ) )
results.update(has_no_keywords(_UpperCamelCase ) )
results.update(has_few_assignments(_UpperCamelCase ) )
return results
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
if not check_uniques(_UpperCamelCase , _UpperCamelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def __lowerCAmelCase ( _UpperCamelCase ) -> Tuple:
'''simple docstring'''
with open(_UpperCamelCase , """rb""" ) as f_in:
with gzip.open(str(_UpperCamelCase ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(_UpperCamelCase , _UpperCamelCase )
os.unlink(_UpperCamelCase )
# Settings
_lowercase = HfArgumentParser(PreprocessingArguments)
_lowercase = parser.parse_args()
if args.num_workers is None:
_lowercase = multiprocessing.cpu_count()
_lowercase = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
_lowercase = time.time()
_lowercase = load_dataset(args.dataset_name, split='train')
print(F"""Time to load dataset: {time.time()-t_start:.2f}""")
# Run preprocessing
_lowercase = time.time()
_lowercase = ds.map(preprocess, num_proc=args.num_workers)
print(F"""Time to preprocess dataset: {time.time()-t_start:.2f}""")
# Deduplicate hashes
_lowercase = set(ds.unique('hash'))
_lowercase = len(uniques) / len(ds)
print(F"""Fraction of duplicates: {1-frac:.2%}""")
# Deduplicate data and apply heuristics
_lowercase = time.time()
_lowercase = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args})
print(F"""Time to filter dataset: {time.time()-t_start:.2f}""")
print(F"""Size of filtered dataset: {len(ds_filter)}""")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
_lowercase = time.time()
_lowercase , _lowercase = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"""Time to deduplicate dataset: {time.time()-t_start:.2f}""")
print(F"""Size of deduplicate dataset: {len(ds_filter)}""")
# Save data in batches of samples_per_file
_lowercase = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / 'duplicate_clusters.json', 'w') as f:
json.dump(duplicate_clusters, f)
_lowercase = output_dir / 'data'
data_dir.mkdir(exist_ok=True)
_lowercase = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
_lowercase = str(data_dir / F"""file-{file_number+1:012}.json""")
_lowercase = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"""Time to save dataset: {time.time()-t_start:.2f}""")
| 242
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """efficientnet"""
def __init__( self : List[Any] , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.001 , __a : float = 0.99 , __a : float = 0.5 , __a : float = 0.2 , **__a : Optional[Any] , ):
'''simple docstring'''
super().__init__(**__a )
lowerCamelCase__: str = num_channels
lowerCamelCase__: Optional[Any] = image_size
lowerCamelCase__: str = width_coefficient
lowerCamelCase__: int = depth_coefficient
lowerCamelCase__: Optional[Any] = depth_divisor
lowerCamelCase__: Union[str, Any] = kernel_sizes
lowerCamelCase__: str = in_channels
lowerCamelCase__: int = out_channels
lowerCamelCase__: Union[str, Any] = depthwise_padding
lowerCamelCase__: List[str] = strides
lowerCamelCase__: Tuple = num_block_repeats
lowerCamelCase__: int = expand_ratios
lowerCamelCase__: List[str] = squeeze_expansion_ratio
lowerCamelCase__: Tuple = hidden_act
lowerCamelCase__: Optional[Any] = hidden_dim
lowerCamelCase__: List[Any] = pooling_type
lowerCamelCase__: Optional[int] = initializer_range
lowerCamelCase__: Any = batch_norm_eps
lowerCamelCase__: Union[str, Any] = batch_norm_momentum
lowerCamelCase__: List[str] = dropout_rate
lowerCamelCase__: Dict = drop_connect_rate
lowerCamelCase__: Dict = sum(__a ) * 4
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = version.parse("""1.11""" )
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
return 1e-5
| 242
| 1
|
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _A ( unittest.TestCase):
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = 3
SCREAMING_SNAKE_CASE_ : str = 250
SCREAMING_SNAKE_CASE_ : Dict = ids_tensor((batch_size, length) , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Tuple = torch.ones((batch_size, length) , device=_SCREAMING_SNAKE_CASE , dtype=torch.float ) / length
return input_ids, scores
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self._get_tensors(5 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self._get_tensors(10 )
self.assertTrue(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = MaxLengthCriteria(max_length=10 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_tensors(9 )
self.assertFalse(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self._get_tensors(10 )
self.assertTrue(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self._get_tensors(5 )
self.assertFalse(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Tuple = self._get_tensors(9 )
self.assertFalse(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = self._get_tensors(10 )
self.assertTrue(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : Optional[int] = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 10 )
def UpperCAmelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = self._get_tensors(5 )
SCREAMING_SNAKE_CASE_ : Dict = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ : List[Any] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
def UpperCAmelCase ( self ):
"""simple docstring"""
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 )
with self.assertWarns(_SCREAMING_SNAKE_CASE ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 )
SCREAMING_SNAKE_CASE_ : Dict = validate_stopping_criteria(StoppingCriteriaList() , 11 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1 )
| 511
|
from typing import Dict, List, Optional
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[str] = {
'nielsr/canine-s': 20_48,
}
# Unicode defines 1,114,112 total “codepoints”
lowerCAmelCase : int = 1_11_41_12
# Below: Constants defining canonical codepoints for special, pseudo-characters.
# Copied from https://github.com/google-research/language/blob/master/language/canine/special_codepoints.py
lowerCAmelCase : Union[str, Any] = 0
lowerCAmelCase : Any = 0XE_0_0_0
lowerCAmelCase : Tuple = 0XE_0_0_1
lowerCAmelCase : List[Any] = 0XE_0_0_2
lowerCAmelCase : int = 0XE_0_0_3
lowerCAmelCase : List[Any] = 0XE_0_0_4
# Maps special codepoints to human-readable names.
lowerCAmelCase : Dict[int, str] = {
# Special symbols are represented using codepoints values that are valid,
# but designated as "Private Use", meaning that they will never be assigned
# characters by the Unicode Consortium, and are thus safe for use here.
#
# NOTE: Do *NOT* add any sort of [UNK_CHAR] here. They are explicitly
# excluded and should fail with a hard error.
CLS: "[CLS]",
SEP: "[SEP]",
BOS: "[BOS]",
MASK: "[MASK]",
PAD: "[PAD]",
RESERVED: "[RESERVED]",
}
# Maps special codepoint human-readable names to their codepoint values.
lowerCAmelCase : Dict[str, int] = {name: codepoint for codepoint, name in SPECIAL_CODEPOINTS.items()}
class _A ( __magic_name__):
SCREAMING_SNAKE_CASE : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=chr(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2048 , **_SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
SCREAMING_SNAKE_CASE_ : Dict = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
SCREAMING_SNAKE_CASE_ : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
SCREAMING_SNAKE_CASE_ : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : str = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , model_max_length=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# Creates a mapping for looking up the IDs of special symbols.
SCREAMING_SNAKE_CASE_ : Dict[str, int] = {}
for codepoint, name in SPECIAL_CODEPOINTS.items():
SCREAMING_SNAKE_CASE_ : str = codepoint
# Creates a mapping for looking up the string forms of special symbol IDs.
SCREAMING_SNAKE_CASE_ : Dict[int, str] = {
codepoint: name for name, codepoint in self._special_codepoints.items()
}
SCREAMING_SNAKE_CASE_ : int = UNICODE_VOCAB_SIZE
SCREAMING_SNAKE_CASE_ : Optional[int] = len(self._special_codepoints )
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self._unicode_vocab_size
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return list(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
return ord(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid token: '{token}'" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
try:
if index in SPECIAL_CODEPOINTS:
return SPECIAL_CODEPOINTS[index]
return chr(_SCREAMING_SNAKE_CASE )
except TypeError:
raise ValueError(f"invalid id: {index}" )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return "".join(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : int = cls + token_ids_a + sep
if token_ids_a is not None:
result += token_ids_a + sep
return result
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
if token_ids_a is not None:
result += ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return result
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[Any] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : int = len(cls + token_ids_a + sep ) * [0]
if token_ids_a is not None:
result += len(token_ids_a + sep ) * [1]
return result
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
"""simple docstring"""
return ()
| 511
| 1
|
"""simple docstring"""
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : List[str] = """new-model"""
if is_tf_available():
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Optional[int] = NewModelConfig
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self :Any ):
__lowerCamelCase : Tuple ='''bert-base-cased'''
__lowerCamelCase : str =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : List[str] =TFAutoModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : Optional[int] ='''bert-base-cased'''
__lowerCamelCase : str =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : List[Any] =TFAutoModelForPreTraining.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def __lowercase ( self :Dict ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Tuple =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : List[Any] =TFAutoModelForCausalLM.from_pretrained(__lowercase )
__lowerCamelCase , __lowerCamelCase : Optional[Any] =TFAutoModelForCausalLM.from_pretrained(__lowercase , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def __lowercase ( self :Any ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[Any] =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : Union[str, Any] =TFAutoModelWithLMHead.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def __lowercase ( self :Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : List[str] =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : Dict =TFAutoModelForMaskedLM.from_pretrained(__lowercase )
__lowerCamelCase , __lowerCamelCase : Optional[Any] =TFAutoModelForMaskedLM.from_pretrained(__lowercase , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def __lowercase ( self :List[str] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : Tuple =TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase )
__lowerCamelCase , __lowerCamelCase : int =TFAutoModelForSeqaSeqLM.from_pretrained(__lowercase , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def __lowercase ( self :Optional[Any] ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Optional[int] =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : Optional[Any] =TFAutoModelForSequenceClassification.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
def __lowercase ( self :int ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCamelCase : Tuple =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : Union[str, Any] =TFAutoModelForQuestionAnswering.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
@slow
@require_tensorflow_probability
def __lowercase ( self :Dict ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowerCamelCase : Any =AutoConfig.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : Tuple =TFAutoModelForTableQuestionAnswering.from_pretrained(__lowercase )
__lowerCamelCase , __lowerCamelCase : Optional[int] =TFAutoModelForTableQuestionAnswering.from_pretrained(
__lowercase , output_loading_info=__lowercase )
self.assertIsNotNone(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Optional[Any] =TFAutoModelWithLMHead.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4410 )
def __lowercase ( self :List[str] ):
__lowerCamelCase : str =TFAutoModelWithLMHead.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=__lowercase ) , 1_4410 )
def __lowercase ( self :Tuple ):
# For the auto model mapping, FunnelConfig has two models: FunnelModel and FunnelBaseModel
__lowerCamelCase : Optional[int] =TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(__lowercase , __lowercase )
__lowerCamelCase : int =copy.deepcopy(model.config )
__lowerCamelCase : Union[str, Any] =['''FunnelBaseModel''']
__lowerCamelCase : Tuple =TFAutoModel.from_config(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowercase )
__lowerCamelCase : Optional[Any] =TFAutoModel.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
def __lowercase ( self :Union[str, Any] ):
try:
AutoConfig.register('''new-model''' , __lowercase )
__lowerCamelCase : Optional[Any] =[
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(__lowercase ):
auto_class.register(__lowercase , __lowercase )
auto_class.register(__lowercase , __lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__lowercase ):
auto_class.register(__lowercase , __lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowerCamelCase : Union[str, Any] =BertModelTester(self ).get_config()
__lowerCamelCase : Optional[int] =NewModelConfig(**tiny_config.to_dict() )
__lowerCamelCase : Tuple =auto_class.from_config(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(__lowercase )
__lowerCamelCase : int =auto_class.from_pretrained(__lowercase )
self.assertIsInstance(__lowercase , __lowercase )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def __lowercase ( self :str ):
with self.assertRaisesRegex(
__lowercase , '''bert-base is not a local folder and is not a valid model identifier''' ):
__lowerCamelCase : int =TFAutoModel.from_pretrained('''bert-base''' )
def __lowercase ( self :List[str] ):
with self.assertRaisesRegex(
__lowercase , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowerCamelCase : List[str] =TFAutoModel.from_pretrained(__lowercase , revision='''aaaaaa''' )
def __lowercase ( self :Any ):
with self.assertRaisesRegex(
__lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__lowerCamelCase : List[str] =TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowercase ( self :Union[str, Any] ):
with self.assertRaisesRegex(__lowercase , '''Use `from_pt=True` to load this model''' ):
__lowerCamelCase : Any =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def __lowercase ( self :Tuple ):
# Make sure we have cached the model.
__lowerCamelCase : Optional[Any] =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__lowerCamelCase : Dict =TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowerCamelCase : Tuple =TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__lowerCamelCase : str =TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 363
|
"""simple docstring"""
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self :Tuple ):
__lowerCamelCase : Any =Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
__lowerCamelCase : Any =Vector()
def __lowercase ( self :Dict ):
__lowerCamelCase : Tuple =Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__lowercase ) , '''(0,0,0,0,0,1)''' )
def __lowercase ( self :Dict ):
__lowerCamelCase : int =Vector([1, 2, 3, 4] )
self.assertEqual(len(__lowercase ) , 4 )
def __lowercase ( self :Dict ):
__lowerCamelCase : Optional[Any] =Vector([1, 2] )
__lowerCamelCase : Dict =Vector([1, 2, 3, 4, 5] )
__lowerCamelCase : List[Any] =Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
__lowerCamelCase : int =Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Tuple =Vector([1, 2, 3] )
__lowerCamelCase : Any =Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def __lowercase ( self :str ):
__lowerCamelCase : Union[str, Any] =Vector([1, 2, 3] )
__lowerCamelCase : int =Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def __lowercase ( self :int ):
__lowerCamelCase : List[Any] =Vector([1, 2, 3] )
__lowerCamelCase : List[Any] =Vector([2, -1, 4] ) # for test of dot product
__lowerCamelCase : Any =Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , '''(3.0,6.0,9.0)''' )
self.assertEqual((a * b) , 0 )
def __lowercase ( self :List[Any] ):
self.assertEqual(str(zero_vector(10 ) ).count('''0''' ) , 10 )
def __lowercase ( self :Union[str, Any] ):
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , '''(0,1,0)''' )
def __lowercase ( self :List[Any] ):
__lowerCamelCase : Any =Vector([1, 2, 3] )
__lowerCamelCase : Optional[int] =Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __lowercase , __lowercase ) ) , '''(3,4,7)''' )
def __lowercase ( self :Dict ):
__lowerCamelCase : List[Any] =Vector([1, 0, 0, 0, 0, 0] )
__lowerCamelCase : Optional[int] =x.copy()
self.assertEqual(str(__lowercase ) , str(__lowercase ) )
def __lowercase ( self :int ):
__lowerCamelCase : str =Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__lowercase ) , '''(0,1,0)''' )
def __lowercase ( self :int ):
__lowerCamelCase : Any =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual('''|1,2,3|\n|2,4,5|\n|6,7,8|\n''' , str(__lowercase ) )
def __lowercase ( self :int ):
__lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : List[Any] =[[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__lowercase , __lowercase ) )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : Optional[Any] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Tuple =[[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__lowercase , __lowercase ) )
def __lowercase ( self :Tuple ):
__lowerCamelCase : Tuple =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def __lowercase ( self :int ):
__lowerCamelCase : Union[str, Any] =Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
__lowerCamelCase : Tuple =Vector([1, 2, 3] )
self.assertEqual('''(14,32,50)''' , str(a * x ) )
self.assertEqual('''|2,4,6|\n|8,10,12|\n|14,16,18|\n''' , str(a * 2 ) )
def __lowercase ( self :Optional[Any] ):
__lowerCamelCase : Optional[int] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual('''|1,2,5|\n|2,4,5|\n|6,7,8|\n''' , str(__lowercase ) )
def __lowercase ( self :str ):
__lowerCamelCase : str =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def __lowercase ( self :Optional[int] ):
__lowerCamelCase : List[str] =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : List[str] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|2,4,10|\n|4,8,10|\n|12,14,18|\n''' , str(a + b ) )
def __lowercase ( self :Union[str, Any] ):
__lowerCamelCase : int =Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
__lowerCamelCase : Optional[int] =Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual('''|0,0,-4|\n|0,0,0|\n|0,0,-2|\n''' , str(a - b ) )
def __lowercase ( self :Any ):
self.assertEqual(
'''|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n''' , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 363
| 1
|
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowercase : str = 16
__lowercase : Any = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 ):
__a : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' )
__a : Optional[Any] = load_dataset('glue' , 'mrpc' )
def tokenize_function(_SCREAMING_SNAKE_CASE : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__a : Dict = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__a : List[str] = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : Optional[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__a : Tuple = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__a : Any = 16
elif accelerator.mixed_precision != "no":
__a : Optional[Any] = 8
else:
__a : List[str] = None
return tokenizer.pad(
_SCREAMING_SNAKE_CASE , padding='longest' , max_length=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
# Instantiate dataloaders.
__a : List[str] = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : List[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowercase : str = mocked_dataloaders # noqa: F811
def lowerCamelCase (_SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Any ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _SCREAMING_SNAKE_CASE ) == "1":
__a : List[Any] = 2
# Initialize accelerator
__a : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Any = config['lr']
__a : List[Any] = int(config['num_epochs'] )
__a : Any = int(config['seed'] )
__a : int = int(config['batch_size'] )
__a : Optional[int] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
__a : str = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__a : Optional[Any] = batch_size // MAX_GPU_BATCH_SIZE
__a : Dict = MAX_GPU_BATCH_SIZE
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : Optional[Any] = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__a : List[str] = model.to(accelerator.device )
# Instantiate optimizer
__a : List[str] = AdamW(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
# Instantiate scheduler
__a : Any = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : Dict = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(_SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__a : List[Any] = model(**_SCREAMING_SNAKE_CASE )
__a : List[str] = outputs.loss
__a : List[str] = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
__a : Optional[int] = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__a : Tuple = model(**_SCREAMING_SNAKE_CASE )
__a : Optional[int] = outputs.logits.argmax(dim=-1 )
__a , __a : Optional[int] = accelerator.gather((predictions, batch['labels']) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
__a : List[str] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
__a : Tuple = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
__a : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__a : Optional[Any] = parser.parse_args()
__a : List[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 476
|
'''simple docstring'''
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowercase : Tuple = 16
__lowercase : int = 32
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] ):
return int(x / 2**20 )
class __UpperCamelCase :
def __enter__( self ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
__a : Union[str, Any] = torch.cuda.memory_allocated()
return self
def __exit__( self , *__a ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
__a : List[str] = torch.cuda.memory_allocated()
__a : Union[str, Any] = torch.cuda.max_memory_allocated()
__a : int = bamb(self.end - self.begin )
__a : Tuple = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Accelerator , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : str = "bert-base-cased" , _SCREAMING_SNAKE_CASE : int = 320 , _SCREAMING_SNAKE_CASE : int = 160 , ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
__a : int = load_dataset(
'glue' , 'mrpc' , split={'train': F"""train[:{n_train}]""", 'validation': F"""validation[:{n_val}]"""} )
def tokenize_function(_SCREAMING_SNAKE_CASE : List[str] ):
# max_length=None => use the model max length (it's actually the default)
__a : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
__a : Dict = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__a : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_SCREAMING_SNAKE_CASE : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
__a : Any = DataLoader(
tokenized_datasets['train'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
__a : List[str] = DataLoader(
tokenized_datasets['validation'] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] ):
# Initialize accelerator
__a : Optional[int] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__a : Tuple = config['lr']
__a : List[Any] = int(config['num_epochs'] )
__a : List[Any] = int(config['seed'] )
__a : List[str] = int(config['batch_size'] )
__a : Optional[Any] = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
__a , __a : Dict = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__a : Tuple = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
__a : Optional[int] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
__a : str = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
__a : int = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
__a : Optional[Any] = 1
__a : List[Any] = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
__a : int = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
__a : Dict = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__a , __a , __a , __a , __a : str = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
__a : List[str] = 0
# We also need to keep track of the stating epoch so files are named properly
__a : Dict = 0
# Now we train the model
__a : Optional[Any] = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a : int = model(**_SCREAMING_SNAKE_CASE )
__a : str = outputs.loss
__a : Dict = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print('Memory before entering the train : {}'.format(bamb(tracemalloc.begin ) ) )
accelerator.print('Memory consumed at the end of the train (end-begin): {}'.format(tracemalloc.used ) )
accelerator.print('Peak Memory consumed during the train (max-begin): {}'.format(tracemalloc.peaked ) )
accelerator.print(
'Total Peak Memory consumed during the train (max): {}'.format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
__a : int = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[F"""epoch-{epoch}"""] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'peak_memory_utilization.json' ) , 'w' ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCamelCase ():
__a : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_SCREAMING_SNAKE_CASE , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
'--output_dir' , type=_SCREAMING_SNAKE_CASE , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--peak_memory_upper_bound' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value.' , )
parser.add_argument(
'--n_train' , type=_SCREAMING_SNAKE_CASE , default=320 , help='Number of training examples to use.' , )
parser.add_argument(
'--n_val' , type=_SCREAMING_SNAKE_CASE , default=160 , help='Number of validation examples to use.' , )
parser.add_argument(
'--num_epochs' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of train epochs.' , )
__a : List[Any] = parser.parse_args()
__a : str = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 476
| 1
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
_UpperCamelCase : str =4
_UpperCamelCase : Optional[Any] =3
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
pass
def lowerCamelCase_ ( A_ ):
for shard in shards:
for i in range(A_ ):
yield {"i": i, "shard": shard}
def lowerCamelCase_ ( ):
__lowerCamelCase = int(os.environ['''RANK'''] )
__lowerCamelCase = int(os.environ['''WORLD_SIZE'''] )
__lowerCamelCase = ArgumentParser()
parser.add_argument('''--streaming''' , type=A_ )
parser.add_argument('''--local_rank''' , type=A_ )
parser.add_argument('''--num_workers''' , type=A_ , default=0 )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = args.streaming
__lowerCamelCase = args.num_workers
__lowerCamelCase = {'''shards''': [f'''shard_{shard_idx}''' for shard_idx in range(A_ )]}
__lowerCamelCase = IterableDataset.from_generator(A_ , gen_kwargs=A_ )
if not streaming:
__lowerCamelCase = Dataset.from_list(list(A_ ) )
__lowerCamelCase = split_dataset_by_node(A_ , rank=A_ , world_size=A_ )
__lowerCamelCase = torch.utils.data.DataLoader(A_ , num_workers=A_ )
__lowerCamelCase = NUM_SHARDS * NUM_ITEMS_PER_SHARD
__lowerCamelCase = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
__lowerCamelCase = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' )
if __name__ == "__main__":
main()
| 708
|
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
_UpperCamelCase : Optional[int] =version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
_UpperCamelCase : Any ="\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
_UpperCamelCase : Optional[Any] ="\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
_UpperCamelCase : List[str] ="\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def _lowerCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'''] , reference_urls=[
'''https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score''',
'''https://en.wikipedia.org/wiki/METEOR''',
] , )
def _lowerCamelCase ( self , _snake_case ):
"""simple docstring"""
import nltk
nltk.download('''wordnet''' )
if NLTK_VERSION >= version.Version('''3.6.5''' ):
nltk.download('''punkt''' )
if NLTK_VERSION >= version.Version('''3.6.6''' ):
nltk.download('''omw-1.4''' )
def _lowerCamelCase ( self , _snake_case , _snake_case , _snake_case=0.9 , _snake_case=3 , _snake_case=0.5 ):
"""simple docstring"""
if NLTK_VERSION >= version.Version('''3.6.5''' ):
__lowerCamelCase = [
meteor_score.single_meteor_score(
word_tokenize(_snake_case ) , word_tokenize(_snake_case ) , alpha=_snake_case , beta=_snake_case , gamma=_snake_case )
for ref, pred in zip(_snake_case , _snake_case )
]
else:
__lowerCamelCase = [
meteor_score.single_meteor_score(_snake_case , _snake_case , alpha=_snake_case , beta=_snake_case , gamma=_snake_case )
for ref, pred in zip(_snake_case , _snake_case )
]
return {"meteor": np.mean(_snake_case )}
| 575
| 0
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__UpperCAmelCase = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__UpperCAmelCase = f"https://www.google.com/search?q={query}&num=100"
__UpperCAmelCase = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__UpperCAmelCase = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__UpperCAmelCase = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 642
|
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __lowerCamelCase :
"""simple docstring"""
a = 42
a = None
a = None
A : Optional[Any] = namedtuple('''CoinsDistribResult''', '''moves excess''')
def lowerCAmelCase__ ( lowerCamelCase : TreeNode | None ):
if root is None:
return 0
# Validation
def count_nodes(lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(lowerCamelCase : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(lowerCamelCase ) != count_coins(lowerCamelCase ):
raise ValueError('The nodes number should be same as the number of coins' )
# Main calculation
def get_distrib(lowerCamelCase : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 ,1 )
_A , _A : Optional[Any] = get_distrib(node.left )
_A , _A : Any = get_distrib(node.right )
_A : str = 1 - left_distrib_excess
_A : Union[str, Any] = 1 - right_distrib_excess
_A : Any = (
left_distrib_moves
+ right_distrib_moves
+ abs(lowerCamelCase )
+ abs(lowerCamelCase )
)
_A : str = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(lowerCamelCase ,lowerCamelCase )
return get_distrib(lowerCamelCase )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = ["input_features", "is_longer"]
def __init__( self : Tuple , _A : Tuple=64 , _A : Any=4_8000 , _A : List[str]=480 , _A : Optional[int]=10 , _A : List[Any]=1024 , _A : str=0.0 , _A : Union[str, Any]=False , _A : float = 0 , _A : float = 1_4000 , _A : int = None , _A : str = "fusion" , _A : str = "repeatpad" , **_A : Union[str, Any] , ):
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
_UpperCamelCase = top_db
_UpperCamelCase = truncation
_UpperCamelCase = padding
_UpperCamelCase = fft_window_size
_UpperCamelCase = (fft_window_size >> 1) + 1
_UpperCamelCase = hop_length
_UpperCamelCase = max_length_s
_UpperCamelCase = max_length_s * sampling_rate
_UpperCamelCase = sampling_rate
_UpperCamelCase = frequency_min
_UpperCamelCase = frequency_max
_UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm=_A , mel_scale='''htk''' , )
_UpperCamelCase = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=_A , min_frequency=_A , max_frequency=_A , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def UpperCamelCase_ ( self : int ):
_UpperCamelCase = copy.deepcopy(self.__dict__ )
_UpperCamelCase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def UpperCamelCase_ ( self : List[Any] , _A : np.array , _A : Optional[np.array] = None ):
_UpperCamelCase = spectrogram(
_A , window_function(self.fft_window_size , '''hann''' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=_A , log_mel='''dB''' , )
return log_mel_spectrogram.T
def UpperCamelCase_ ( self : Union[str, Any] , _A : List[Any] , _A : Optional[Any] , _A : List[Any] ):
_UpperCamelCase = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCamelCase = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
_UpperCamelCase = [0]
# randomly choose index for each part
_UpperCamelCase = np.random.choice(ranges[0] )
_UpperCamelCase = np.random.choice(ranges[1] )
_UpperCamelCase = np.random.choice(ranges[2] )
_UpperCamelCase = mel[idx_front : idx_front + chunk_frames, :]
_UpperCamelCase = mel[idx_middle : idx_middle + chunk_frames, :]
_UpperCamelCase = mel[idx_back : idx_back + chunk_frames, :]
_UpperCamelCase = torch.tensor(mel[None, None, :] )
_UpperCamelCase = torch.nn.functional.interpolate(
_A , size=[chunk_frames, 64] , mode='''bilinear''' , align_corners=_A )
_UpperCamelCase = mel_shrink[0][0].numpy()
_UpperCamelCase = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def UpperCamelCase_ ( self : Optional[int] , _A : np.array , _A : int , _A : str , _A : int ):
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
_UpperCamelCase = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
_UpperCamelCase = len(_A ) - max_length
_UpperCamelCase = np.random.randint(0 , overflow + 1 )
_UpperCamelCase = waveform[idx : idx + max_length]
_UpperCamelCase = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
_UpperCamelCase = self._np_extract_fbank_features(_A , self.mel_filters )
_UpperCamelCase = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
_UpperCamelCase = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
_UpperCamelCase = np.stack([mel, mel, mel, mel] , axis=0 )
_UpperCamelCase = False
else:
_UpperCamelCase = self._random_mel_fusion(_A , _A , _A )
_UpperCamelCase = True
else:
raise NotImplementedError(F"""data_truncating {truncation} not implemented""" )
else:
_UpperCamelCase = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
_UpperCamelCase = int(max_length / len(_A ) )
_UpperCamelCase = np.stack(np.tile(_A , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
_UpperCamelCase = int(max_length / len(_A ) )
_UpperCamelCase = np.stack(np.tile(_A , _A ) )
_UpperCamelCase = np.pad(_A , (0, max_length - waveform.shape[0]) , mode='''constant''' , constant_values=0 )
if truncation == "fusion":
_UpperCamelCase = self._np_extract_fbank_features(_A , self.mel_filters )
_UpperCamelCase = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
_UpperCamelCase = self._np_extract_fbank_features(_A , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : str , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : str = None , _A : Optional[str] = None , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , **_A : str , ):
_UpperCamelCase = truncation if truncation is not None else self.truncation
_UpperCamelCase = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
_UpperCamelCase = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
_UpperCamelCase = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCamelCase = [np.asarray(_A , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
_UpperCamelCase = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCamelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCamelCase = [np.asarray(_A )]
# convert to mel spectrogram, truncate and pad if needed.
_UpperCamelCase = [
self._get_input_mel(_A , max_length if max_length else self.nb_max_samples , _A , _A )
for waveform in raw_speech
]
_UpperCamelCase = []
_UpperCamelCase = []
for mel, longer in padded_inputs:
input_mel.append(_A )
is_longer.append(_A )
if truncation == "fusion" and sum(_A ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
_UpperCamelCase = np.random.randint(0 , len(_A ) )
_UpperCamelCase = True
if isinstance(input_mel[0] , _A ):
_UpperCamelCase = [np.asarray(_A , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
_UpperCamelCase = [[longer] for longer in is_longer]
_UpperCamelCase = {'''input_features''': input_mel, '''is_longer''': is_longer}
_UpperCamelCase = BatchFeature(_A )
if return_tensors is not None:
_UpperCamelCase = input_features.convert_to_tensors(_A )
return input_features
| 71
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase = {"configuration_unispeech": ["UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP", "UniSpeechConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST",
"UniSpeechForCTC",
"UniSpeechForPreTraining",
"UniSpeechForSequenceClassification",
"UniSpeechModel",
"UniSpeechPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 71
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''roberta''', choices=['''roberta''', '''gpt2'''])
parser.add_argument('''--model_name''', default='''roberta-large''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_roberta_048131723.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
_UpperCAmelCase : Optional[int] = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Tuple = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[Any] = '''roberta'''
elif args.model_type == "gpt2":
_UpperCAmelCase : List[Any] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : List[Any] = '''transformer'''
_UpperCAmelCase : List[Any] = model.state_dict()
_UpperCAmelCase : str = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Tuple = state_dict[F"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Any = F"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Union[str, Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : int = F"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : Tuple = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Any = state_dict[
F"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : List[str] = state_dict[F"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[int] = state_dict[
F"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[F"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[F"""lm_head.dense.{w}"""]
_UpperCAmelCase : List[str] = state_dict[F"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[int] = state_dict[F"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : List[Any] = state_dict['''lm_head.weight''']
print(F"""N layers selected for distillation: {std_idx}""")
print(F"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(F"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 72
|
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
lowerCAmelCase_ = None
try:
import msvcrt
except ImportError:
lowerCAmelCase_ = None
try:
import fcntl
except ImportError:
lowerCAmelCase_ = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
lowerCAmelCase_ = OSError
# Data
# ------------------------------------------------
lowerCAmelCase_ = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
lowerCAmelCase_ = '3.0.12'
lowerCAmelCase_ = None
def A__ ( ):
'''simple docstring'''
global _logger
UpperCamelCase : List[str] = _logger or logging.getLogger(__name__)
return _logger
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
'''simple docstring'''
UpperCamelCase : Tuple = lock_file
return None
def __str__( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = f'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = lock
return None
def __enter__( self ) -> Optional[int]:
'''simple docstring'''
return self.lock
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> List[Any]:
'''simple docstring'''
self.lock.release()
return None
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Tuple = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
UpperCamelCase : int = self.hash_filename_if_too_long(lowerCamelCase , lowerCamelCase )
# The path to the lock file.
UpperCamelCase : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
UpperCamelCase : Dict = None
# The default timeout value.
UpperCamelCase : Dict = timeout
# We use this lock primarily for the lock counter.
UpperCamelCase : Any = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
UpperCamelCase : Any = 0
return None
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self._lock_file
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase ) -> List[str]:
'''simple docstring'''
UpperCamelCase : Optional[Any] = float(lowerCamelCase )
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
raise NotImplementedError()
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase=None , lowerCamelCase=0.05 ) -> Union[str, Any]:
'''simple docstring'''
if timeout is None:
UpperCamelCase : Union[str, Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
UpperCamelCase : str = id(self )
UpperCamelCase : Union[str, Any] = self._lock_file
UpperCamelCase : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(f'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
f'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
UpperCamelCase : Tuple = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase=False ) -> Optional[int]:
'''simple docstring'''
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
UpperCamelCase : Optional[Any] = id(self )
UpperCamelCase : Tuple = self._lock_file
logger().debug(f'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
UpperCamelCase : List[str] = 0
logger().debug(f'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> List[Any]:
'''simple docstring'''
self.acquire()
return self
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.release()
return None
def __del__( self ) -> List[str]:
'''simple docstring'''
self.release(force=lowerCamelCase )
return None
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase ) -> str:
'''simple docstring'''
UpperCamelCase : Tuple = os.path.basename(lowerCamelCase )
if len(lowerCamelCase ) > max_length and max_length > 0:
UpperCamelCase : str = os.path.dirname(lowerCamelCase )
UpperCamelCase : Any = str(hash(lowerCamelCase ) )
UpperCamelCase : Dict = filename[: max_length - len(lowerCamelCase ) - 8] + "..." + hashed_filename + ".lock"
return os.path.join(lowerCamelCase , lowerCamelCase )
else:
return path
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> Optional[int]:
'''simple docstring'''
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase )
UpperCamelCase : List[str] = "\\\\?\\" + relative_to_absolute_path(self.lock_file )
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
UpperCamelCase : str = os.open(self._lock_file , lowerCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase )
else:
UpperCamelCase : Dict = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : str = self._lock_file_fd
UpperCamelCase : List[Any] = None
msvcrt.locking(lowerCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase=-1 , lowerCamelCase=None ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : Optional[int] = os.statvfs(os.path.dirname(lowerCamelCase ) ).f_namemax
super().__init__(lowerCamelCase , timeout=lowerCamelCase , max_filename_length=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
UpperCamelCase : int = os.O_RDWR | os.O_CREAT | os.O_TRUNC
UpperCamelCase : Union[str, Any] = os.open(self._lock_file , lowerCamelCase )
try:
fcntl.flock(lowerCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase )
else:
UpperCamelCase : Any = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self._lock_file_fd
UpperCamelCase : str = None
fcntl.flock(lowerCamelCase , fcntl.LOCK_UN )
os.close(lowerCamelCase )
return None
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase : Dict = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
UpperCamelCase : Union[str, Any] = os.open(self._lock_file , lowerCamelCase )
except OSError:
pass
else:
UpperCamelCase : Dict = fd
return None
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
os.close(self._lock_file_fd )
UpperCamelCase : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
lowerCAmelCase_ = None
if msvcrt:
lowerCAmelCase_ = WindowsFileLock
elif fcntl:
lowerCAmelCase_ = UnixFileLock
else:
lowerCAmelCase_ = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available')
| 173
| 0
|
"""simple docstring"""
UpperCAmelCase_ : str = 8.314_4598
def _lowerCAmelCase(a : float , a : float ) -> float:
if temperature < 0:
raise Exception('''Temperature cannot be less than 0 K''' )
if molar_mass <= 0:
raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
UpperCAmelCase_ : Optional[Any] = 3_0_0
UpperCAmelCase_ : Tuple = 2_8
UpperCAmelCase_ : Optional[Any] = rms_speed_of_molecule(temperature, molar_mass)
print(f"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
| 721
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase_ : int = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : Dict = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : int = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : str = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 165
| 0
|
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Tuple ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = tokenizer.decode(greedy_ids[0] )
A_ = TextIteratorStreamer(UpperCAmelCase )
A_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
A_ = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
A_ = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : List[str] ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase )
A_ = greedy_ids[:, input_ids.shape[1] :]
A_ = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase , skip_prompt=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=10 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
A_ = cs.out[:-1]
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def __A ( self : Optional[Any] ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
A_ = AutoTokenizer.from_pretrained("distilgpt2" )
A_ = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = torch.ones((1, 5) , device=UpperCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
A_ = TextStreamer(UpperCAmelCase , skip_special_tokens=UpperCAmelCase )
model.generate(UpperCAmelCase , max_new_tokens=1 , do_sample=UpperCAmelCase , streamer=UpperCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
A_ = cs.out[:-1] # Remove the final "\n"
A_ = tokenizer(UpperCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def __A ( self : int ):
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
A_ = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(UpperCAmelCase )
A_ = -1
A_ = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(UpperCAmelCase )
A_ = TextIteratorStreamer(UpperCAmelCase , timeout=0.001 )
A_ = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
A_ = Thread(target=model.generate , kwargs=UpperCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(UpperCAmelCase ):
A_ = ""
for new_text in streamer:
streamer_text += new_text
| 86
|
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__a :List[Any] = get_logger()
__a :Optional[dict] = None
class _a ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , **UpperCAmelCase : List[Any] ):
super().__init__(features=UpperCAmelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCAmelCase , UpperCAmelCase ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(UpperCAmelCase )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
A_ = device if isinstance(UpperCAmelCase , UpperCAmelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
A_ = str(jax.devices()[0] )
A_ = jnp_array_kwargs
@staticmethod
def __A ( ):
import jax
return {str(UpperCAmelCase ): device for device in jax.devices()}
def __A ( self : Optional[int] , UpperCAmelCase : int ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCAmelCase , axis=0 )
return column
def __A ( self : List[str] , UpperCAmelCase : str ):
import jax
import jax.numpy as jnp
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
A_ = {}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
A_ = {"dtype": jnp.intaa}
else:
A_ = {"dtype": jnp.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
A_ = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
A_ = np.asarray(UpperCAmelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
A_ = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCAmelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def __A ( self : Any , UpperCAmelCase : Dict ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCAmelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCAmelCase , "__array__" ) and not isinstance(UpperCAmelCase , jax.Array ):
A_ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def __A ( self : Tuple , UpperCAmelCase : dict ):
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
A_ = self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def __A ( self : Any , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
A_ = self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
A_ = self.recursive_tensorize(UpperCAmelCase )
A_ = self._consolidate(UpperCAmelCase )
return column
def __A ( self : Dict , UpperCAmelCase : pa.Table ):
A_ = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
A_ = self.python_features_decoder.decode_batch(UpperCAmelCase )
A_ = self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
A_ = self._consolidate(batch[column_name] )
return batch
| 86
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase : int = logging.get_logger(__name__)
_lowercase : Any = {
'Salesforce/codegen-350M-nl': 'https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json',
'Salesforce/codegen-350M-multi': 'https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json',
'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json',
'Salesforce/codegen-2B-nl': 'https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json',
'Salesforce/codegen-2B-multi': 'https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json',
'Salesforce/codegen-2B-mono': 'https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json',
'Salesforce/codegen-6B-nl': 'https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json',
'Salesforce/codegen-6B-multi': 'https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json',
'Salesforce/codegen-6B-mono': 'https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json',
'Salesforce/codegen-16B-nl': 'https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json',
'Salesforce/codegen-16B-multi': 'https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json',
'Salesforce/codegen-16B-mono': 'https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json',
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = "codegen"
a__ : Tuple = {
"max_position_embeddings": "n_positions",
"hidden_size": "n_embd",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : List[str] , _lowercase : Optional[int]=5_04_00 , _lowercase : List[str]=20_48 , _lowercase : Optional[int]=20_48 , _lowercase : Tuple=40_96 , _lowercase : Optional[Any]=28 , _lowercase : Tuple=16 , _lowercase : str=64 , _lowercase : Dict=None , _lowercase : Any="gelu_new" , _lowercase : Any=0.0 , _lowercase : Dict=0.0 , _lowercase : Dict=0.0 , _lowercase : str=1E-5 , _lowercase : Union[str, Any]=0.02 , _lowercase : List[str]=True , _lowercase : Dict=5_02_56 , _lowercase : str=5_02_56 , _lowercase : Any=False , **_lowercase : Optional[Any] , ):
__UpperCAmelCase = vocab_size
__UpperCAmelCase = n_ctx
__UpperCAmelCase = n_positions
__UpperCAmelCase = n_embd
__UpperCAmelCase = n_layer
__UpperCAmelCase = n_head
__UpperCAmelCase = n_inner
__UpperCAmelCase = rotary_dim
__UpperCAmelCase = activation_function
__UpperCAmelCase = resid_pdrop
__UpperCAmelCase = embd_pdrop
__UpperCAmelCase = attn_pdrop
__UpperCAmelCase = layer_norm_epsilon
__UpperCAmelCase = initializer_range
__UpperCAmelCase = use_cache
__UpperCAmelCase = bos_token_id
__UpperCAmelCase = eos_token_id
super().__init__(
bos_token_id=_lowercase , eos_token_id=_lowercase , tie_word_embeddings=_lowercase , **_lowercase )
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : int , _lowercase : PretrainedConfig , _lowercase : str = "default" , _lowercase : List[PatchingSpec] = None , _lowercase : bool = False , ):
super().__init__(_lowercase , task=_lowercase , patching_specs=_lowercase , use_past=_lowercase )
if not getattr(self._config , '''pad_token_id''' , _lowercase ):
# TODO: how to do that better?
__UpperCAmelCase = 0
@property
def a ( self : Optional[Any] ):
__UpperCAmelCase = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
self.fill_with_past_key_values_(_lowercase , direction='''inputs''' )
__UpperCAmelCase = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
__UpperCAmelCase = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def a ( self : Dict ):
return self._config.n_layer
@property
def a ( self : Tuple ):
return self._config.n_head
def a ( self : Tuple , _lowercase : PreTrainedTokenizer , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : bool = False , _lowercase : Optional[TensorType] = None , ):
__UpperCAmelCase = super(_lowercase , self ).generate_dummy_inputs(
_lowercase , batch_size=_lowercase , seq_length=_lowercase , is_pair=_lowercase , framework=_lowercase )
# We need to order the input in the way they appears in the forward()
__UpperCAmelCase = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__UpperCAmelCase , __UpperCAmelCase = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__UpperCAmelCase = seqlen + 2
__UpperCAmelCase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__UpperCAmelCase = [
(torch.zeros(_lowercase ), torch.zeros(_lowercase )) for _ in range(self.num_layers )
]
__UpperCAmelCase = common_inputs['''attention_mask''']
if self.use_past:
__UpperCAmelCase = ordered_inputs['''attention_mask'''].dtype
__UpperCAmelCase = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_lowercase , _lowercase , dtype=_lowercase )] , dim=1 )
return ordered_inputs
@property
def a ( self : Any ):
return 13
| 397
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
_lowercase : List[str] = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Tuple , snake_case_ :List[str] , snake_case_ :List[Any]=False , snake_case_ :List[Any]=True ):
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
__UpperCAmelCase = config_class.from_json_file(snake_case_ )
__UpperCAmelCase = True
__UpperCAmelCase = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__UpperCAmelCase = model_class(snake_case_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCAmelCase = cached_file(
snake_case_ , snake_case_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCAmelCase = load_pytorch_checkpoint_in_tfa_model(snake_case_ , snake_case_ )
if compare_with_pt_model:
__UpperCAmelCase = tf_model(tf_model.dummy_inputs , training=snake_case_ ) # build the network
__UpperCAmelCase = torch.load(snake_case_ , map_location='''cpu''' )
__UpperCAmelCase = pt_model_class.from_pretrained(
pretrained_model_name_or_path=snake_case_ , config=snake_case_ , state_dict=snake_case_ )
with torch.no_grad():
__UpperCAmelCase = pt_model(**pt_model.dummy_inputs )
__UpperCAmelCase = pto[0].numpy()
__UpperCAmelCase = tfo[0].numpy()
__UpperCAmelCase = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(snake_case_ , save_format='''h5''' )
def lowercase__ ( snake_case_ :Union[str, Any] , snake_case_ :List[str] , snake_case_ :int=None , snake_case_ :Optional[int]=None , snake_case_ :List[str]=False , snake_case_ :Optional[int]=False , snake_case_ :Dict=False , snake_case_ :List[Any]=False , ):
if args_model_type is None:
__UpperCAmelCase = list(MODEL_CLASSES.keys() )
else:
__UpperCAmelCase = [args_model_type]
for j, model_type in enumerate(snake_case_ , start=1 ):
print('''=''' * 100 )
print(F''' Converting model type {j}/{len(snake_case_ )}: {model_type}''' )
print('''=''' * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCAmelCase = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCAmelCase = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(snake_case_ , snake_case_ ) , start=1 ):
print('''-''' * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__UpperCAmelCase = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(snake_case_ )}: {model_shortcut_name} - model_type {model_type}''' )
print('''-''' * 100 )
if config_shortcut_name in aws_config_map:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
else:
__UpperCAmelCase = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCAmelCase = cached_file(snake_case_ , snake_case_ , force_download=not use_cached_models )
else:
__UpperCAmelCase = model_shortcut_name
if os.path.isfile(snake_case_ ):
__UpperCAmelCase = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=snake_case_ , pytorch_checkpoint_path=snake_case_ , config_file=snake_case_ , tf_dump_path=os.path.join(snake_case_ , model_shortcut_name + '''-tf_model.h5''' ) , compare_with_pt_model=snake_case_ , )
if remove_cached_files:
os.remove(snake_case_ )
os.remove(snake_case_ )
if __name__ == "__main__":
_lowercase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f"""Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and """
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
_lowercase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 397
| 1
|
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
lowercase__ : Optional[int] = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F'''{bindir}/../../examples/pytorch/translation'''):
from run_translation import main # noqa
set_seed(42)
lowercase__ : Optional[int] = "sshleifer/student_marian_en_ro_6_1"
lowercase__ : List[str] = "sshleifer/tiny-mbart"
@require_torch
class lowerCamelCase ( lowerCamelCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : Optional[Any]=False , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Dict=True , ) ->Union[str, Any]:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=UpperCAmelCase__ , num_train_epochs=1 , distributed=UpperCAmelCase__ , extra_args_str=UpperCAmelCase__ , predict_with_generate=UpperCAmelCase__ , do_train=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , do_predict=UpperCAmelCase__ , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(UpperCAmelCase__ , '''trainer_state.json''' ) ).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , UpperCAmelCase__ )
assert not math.isnan(float(last_step_stats['''eval_loss'''] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def lowerCAmelCase__ ( self : List[Any] ) ->Tuple:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Tuple ) ->List[str]:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase__ ( self : Optional[Any] ) ->int:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str='''--sharded_ddp simple''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase__ ( self : Optional[Any] ) ->Union[str, Any]:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str='''--sharded_ddp simple --fp16''' )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase__ ( self : Optional[Any] ) ->str:
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=UpperCAmelCase__ )
@unittest.skip('''Requires an update of the env running those tests''' )
@require_torch_multi_gpu
@require_fairscale
def lowerCAmelCase__ ( self : Optional[int] ) ->str:
self.run_seqaseq_quick(
distributed=UpperCAmelCase__ , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=UpperCAmelCase__ )
@require_apex
@require_torch_gpu
def lowerCAmelCase__ ( self : Optional[int] ) ->List[Any]:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=UpperCAmelCase__ , extra_args_str='''--fp16 --fp16_backend=apex''' )
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''] )
@require_torch_multi_gpu
def lowerCAmelCase__ ( self : str , UpperCAmelCase__ : Optional[Any] ) ->Optional[int]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**UpperCAmelCase__ , extra_args_str=data['''extra_args_str'''] )
UpperCAmelCase_ = len(re.findall(UpperCAmelCase__ , cl.err ) )
self.assertEqual(UpperCAmelCase__ , data['''n_matches'''] )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->List[str]:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=UpperCAmelCase__ , learning_rate=3e-4 , num_train_epochs=10 , distributed=UpperCAmelCase__ , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(UpperCAmelCase__ , '''trainer_state.json''' ) ).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , UpperCAmelCase__ )
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(UpperCAmelCase__ )
UpperCAmelCase_ = {os.path.basename(UpperCAmelCase__ ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def lowerCAmelCase__ ( self : List[Any] ) ->Union[str, Any]:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(UpperCAmelCase__ : str ) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=UpperCAmelCase__ , learning_rate=3e-4 , num_train_epochs=1 , optim=UpperCAmelCase__ , distributed=UpperCAmelCase__ , extra_args_str=UpperCAmelCase__ , do_eval=UpperCAmelCase__ , do_predict=UpperCAmelCase__ , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(UpperCAmelCase__ , '''trainer_state.json''' ) ).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20 )
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20 )
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
UpperCAmelCase__ , UpperCAmelCase__ , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
UpperCAmelCase__ , UpperCAmelCase__ , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
UpperCAmelCase__ , UpperCAmelCase__ , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def lowerCAmelCase__ ( self : List[Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : float = 3e-3 , UpperCAmelCase__ : str = "adafactor" , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : str = None , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : int = None , ) ->str:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(UpperCAmelCase__ )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(UpperCAmelCase__ )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
UpperCAmelCase_ = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(UpperCAmelCase__ )}
""".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(UpperCAmelCase__ , env=self.get_env() )
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(UpperCAmelCase__ , '''argv''' , UpperCAmelCase__ ):
main()
return output_dir
| 390
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ['''onnx''']
def __init__( self : List[Any] , *UpperCAmelCase__ : Union[str, Any] , **UpperCAmelCase__ : Tuple ) ->Union[str, Any]:
requires_backends(self , ['''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : List[Any] , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : Optional[Any] ) ->Any:
requires_backends(cls , ['''onnx'''] )
@classmethod
def lowerCAmelCase__ ( cls : Any , *UpperCAmelCase__ : Tuple , **UpperCAmelCase__ : List[Any] ) ->Optional[Any]:
requires_backends(cls , ['''onnx'''] )
| 390
| 1
|
'''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
flip_channel_order,
get_resize_output_image_size,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging
if is_vision_available():
import PIL
if is_torch_available():
import torch
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
lowerCamelCase_ =['pixel_values']
def __init__( self : Any , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , __lowerCAmelCase : Union[int, float] = 1 / 255 , __lowerCAmelCase : bool = True , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = True , **__lowerCAmelCase : str , ) -> None:
super().__init__(**__lowerCAmelCase)
lowercase_ = size if size is not None else {"""shortest_edge""": 224}
lowercase_ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase)
lowercase_ = crop_size if crop_size is not None else {"""height""": 256, """width""": 256}
lowercase_ = get_size_dict(__lowerCAmelCase , param_name="crop_size")
lowercase_ = do_resize
lowercase_ = size
lowercase_ = resample
lowercase_ = do_rescale
lowercase_ = rescale_factor
lowercase_ = do_center_crop
lowercase_ = crop_size
lowercase_ = do_flip_channel_order
def __UpperCAmelCase ( self : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : PILImageResampling = PIL.Image.BILINEAR , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : str , ) -> np.ndarray:
lowercase_ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase)
if "shortest_edge" not in size:
raise ValueError(F'The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}')
lowercase_ = get_resize_output_image_size(__lowerCAmelCase , size=size["shortest_edge"] , default_to_square=__lowerCAmelCase)
return resize(__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Dict[str, int] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Any , ) -> np.ndarray:
lowercase_ = get_size_dict(__lowerCAmelCase)
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}')
return center_crop(__lowerCAmelCase , size=(size["height"], size["width"]) , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : int , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Union[int, float] , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **__lowerCAmelCase : Union[str, Any] , ) -> Optional[Any]:
return rescale(__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None) -> np.ndarray:
return flip_channel_order(__lowerCAmelCase , data_format=__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : ImageInput , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : PILImageResampling = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : float = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Dict[str, int] = None , __lowerCAmelCase : bool = None , __lowerCAmelCase : Optional[Union[str, TensorType]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : Dict , ) -> PIL.Image.Image:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = resample if resample is not None else self.resample
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ = (
do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order
)
lowercase_ = size if size is not None else self.size
lowercase_ = get_size_dict(__lowerCAmelCase , default_to_square=__lowerCAmelCase)
lowercase_ = crop_size if crop_size is not None else self.crop_size
lowercase_ = get_size_dict(__lowerCAmelCase , param_name="crop_size")
lowercase_ = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray.")
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True.")
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True.")
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True.")
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(__lowerCAmelCase) for image in images]
if do_resize:
lowercase_ = [self.resize(image=__lowerCAmelCase , size=__lowerCAmelCase , resample=__lowerCAmelCase) for image in images]
if do_center_crop:
lowercase_ = [self.center_crop(image=__lowerCAmelCase , size=__lowerCAmelCase) for image in images]
if do_rescale:
lowercase_ = [self.rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase) for image in images]
# the pretrained checkpoints assume images are BGR, not RGB
if do_flip_channel_order:
lowercase_ = [self.flip_channel_order(image=__lowerCAmelCase) for image in images]
lowercase_ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images]
lowercase_ = {"""pixel_values""": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase)
def __UpperCAmelCase ( self : int , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[Tuple] = None) -> List[str]:
lowercase_ = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__lowerCAmelCase) != len(__lowerCAmelCase):
raise ValueError(
"Make sure that you pass in as many target sizes as the batch dimension of the logits")
if is_torch_tensor(__lowerCAmelCase):
lowercase_ = target_sizes.numpy()
lowercase_ = []
for idx in range(len(__lowerCAmelCase)):
lowercase_ = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode="bilinear" , align_corners=__lowerCAmelCase)
lowercase_ = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(__lowerCAmelCase)
else:
lowercase_ = logits.argmax(dim=1)
lowercase_ = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 714
|
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase_ : Any = logging.get_logger(__name__)
class lowercase ( __lowerCamelCase ):
lowerCamelCase_ =['pixel_values']
def __init__( self : Optional[int] , __lowerCAmelCase : bool = True , __lowerCAmelCase : int = 32 , __lowerCAmelCase : List[str]=PILImageResampling.BILINEAR , __lowerCAmelCase : bool = True , **__lowerCAmelCase : Union[str, Any] , ) -> None:
lowercase_ = do_resize
lowercase_ = do_rescale
lowercase_ = size_divisor
lowercase_ = resample
super().__init__(**__lowerCAmelCase)
def __UpperCAmelCase ( self : List[str] , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : int , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[ChannelDimension] = None , **__lowerCAmelCase : Any) -> np.ndarray:
lowercase_ , lowercase_ = get_image_size(__lowerCAmelCase)
# Rounds the height and width down to the closest multiple of size_divisor
lowercase_ = height // size_divisor * size_divisor
lowercase_ = width // size_divisor * size_divisor
lowercase_ = resize(__lowerCAmelCase , (new_h, new_w) , resample=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
return image
def __UpperCAmelCase ( self : str , __lowerCAmelCase : np.ndarray , __lowerCAmelCase : float , __lowerCAmelCase : Optional[ChannelDimension] = None , **__lowerCAmelCase : int) -> np.ndarray:
return rescale(image=__lowerCAmelCase , scale=__lowerCAmelCase , data_format=__lowerCAmelCase , **__lowerCAmelCase)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCAmelCase : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : str=None , __lowerCAmelCase : Optional[bool] = None , __lowerCAmelCase : Optional[Union[TensorType, str]] = None , __lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **__lowerCAmelCase : List[str] , ) -> BatchFeature:
lowercase_ = do_resize if do_resize is not None else self.do_resize
lowercase_ = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ = size_divisor if size_divisor is not None else self.size_divisor
lowercase_ = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing")
lowercase_ = make_list_of_images(__lowerCAmelCase)
if not valid_images(__lowerCAmelCase):
raise ValueError("Invalid image(s)")
# All transformations expect numpy arrays.
lowercase_ = [to_numpy_array(__lowerCAmelCase) for img in images]
if do_resize:
lowercase_ = [self.resize(__lowerCAmelCase , size_divisor=__lowerCAmelCase , resample=__lowerCAmelCase) for image in images]
if do_rescale:
lowercase_ = [self.rescale(__lowerCAmelCase , scale=1 / 255) for image in images]
lowercase_ = [to_channel_dimension_format(__lowerCAmelCase , __lowerCAmelCase) for image in images]
lowercase_ = {"pixel_values": images}
return BatchFeature(data=__lowerCAmelCase , tensor_type=__lowerCAmelCase)
| 461
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase = {"configuration_glpn": ["GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP", "GLPNConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["GLPNFeatureExtractor"]
UpperCamelCase = ["GLPNImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"GLPN_PRETRAINED_MODEL_ARCHIVE_LIST",
"GLPNForDepthEstimation",
"GLPNLayer",
"GLPNModel",
"GLPNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66
|
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> tuple[float, float]:
# Check if the input is valid
if not len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
_lowercase , _lowercase , _lowercase : Tuple = equationa
_lowercase , _lowercase , _lowercase : Dict = equationa
# Calculate the determinants of the matrices
_lowercase : str = aa * ba - aa * ba
_lowercase : Any = ca * ba - ca * ba
_lowercase : Optional[int] = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
_lowercase : Union[str, Any] = determinant_x / determinant
_lowercase : Tuple = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y)
| 66
| 1
|
'''simple docstring'''
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase : Dict = logging.getLogger(__name__)
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
A : int = False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if not self.initialized:
A : str = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
A : Tuple = True
def snake_case ( self ) -> int:
self.retriever.index.init_index()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
A : str = self.retriever._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return doc_ids, retrieved_doc_embeds
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(__UpperCAmelCase ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
A : Optional[int] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for worker in self.retrieval_workers
] )
def snake_case ( self ) -> Union[str, Any]:
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A : Union[str, Any] = ray.get(random_worker.retrieve.remote(__UpperCAmelCase , __UpperCAmelCase ) )
else:
A : Any = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Any:
return super(__UpperCAmelCase , cls ).get_tokenizers(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict:
A : int = kwargs.pop('''config''' , __UpperCAmelCase ) or RagConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
A : Tuple = RagTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
A : Any = rag_tokenizer.question_encoder
A : int = rag_tokenizer.generator
if indexed_dataset is not None:
A : Optional[int] = '''custom'''
A : Tuple = CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase )
else:
A : Union[str, Any] = cls._build_index(__UpperCAmelCase )
return cls(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , retrieval_workers=__UpperCAmelCase , index=__UpperCAmelCase , )
| 701
|
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
lowercase : Dict = logging.getLogger(__name__)
class __lowercase :
"""simple docstring"""
def __init__( self ) -> Optional[int]:
A : int = False
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
if not self.initialized:
A : str = RagRetriever(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
A : Tuple = True
def snake_case ( self ) -> int:
self.retriever.index.init_index()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> str:
A , A : str = self.retriever._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return doc_ids, retrieved_doc_embeds
class __lowercase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
if index is not None and index.is_initialized() and len(__UpperCAmelCase ) > 0:
raise ValueError(
'''When using Ray for distributed fine-tuning, '''
'''you\'ll need to provide the paths instead, '''
'''as the dataset and the index are loaded '''
'''separately. More info in examples/rag/use_own_knowledge_dataset.py ''' )
super().__init__(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , index=__UpperCAmelCase , init_retrieval=__UpperCAmelCase , )
A : Optional[int] = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
for worker in self.retrieval_workers
] )
def snake_case ( self ) -> Union[str, Any]:
logger.info('''initializing retrieval''' )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
A : List[Any] = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
A , A : Union[str, Any] = ray.get(random_worker.retrieve.remote(__UpperCAmelCase , __UpperCAmelCase ) )
else:
A , A : Any = self._main_retrieve(__UpperCAmelCase , __UpperCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Any:
return super(__UpperCAmelCase , cls ).get_tokenizers(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def snake_case ( cls , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , **__UpperCAmelCase ) -> Dict:
A : int = kwargs.pop('''config''' , __UpperCAmelCase ) or RagConfig.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
A : Tuple = RagTokenizer.from_pretrained(__UpperCAmelCase , config=__UpperCAmelCase )
A : Any = rag_tokenizer.question_encoder
A : int = rag_tokenizer.generator
if indexed_dataset is not None:
A : Optional[int] = '''custom'''
A : Tuple = CustomHFIndex(config.retrieval_vector_size , __UpperCAmelCase )
else:
A : Union[str, Any] = cls._build_index(__UpperCAmelCase )
return cls(
__UpperCAmelCase , question_encoder_tokenizer=__UpperCAmelCase , generator_tokenizer=__UpperCAmelCase , retrieval_workers=__UpperCAmelCase , index=__UpperCAmelCase , )
| 423
| 0
|
# flake8: noqa
# Lint as: python3
_lowerCAmelCase = [
"VerificationMode",
"Version",
"disable_progress_bar",
"enable_progress_bar",
"is_progress_bar_enabled",
"experimental",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 10
|
import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10
| 1
|
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _snake_case :
@staticmethod
def lowercase__ ( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class _snake_case ( unittest.TestCase ):
__lowerCAmelCase : Dict = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Any = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
lowercase__ : Any = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = vqa_pipeline(SCREAMING_SNAKE_CASE_ , top_k=1)
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [
[{"""score""": ANY(SCREAMING_SNAKE_CASE_), """answer""": ANY(SCREAMING_SNAKE_CASE_)}],
[{"""score""": ANY(SCREAMING_SNAKE_CASE_), """answer""": ANY(SCREAMING_SNAKE_CASE_)}],
] , )
@require_torch
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : Optional[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""")
lowercase__ : Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase__ : Dict = """How many cats are there?"""
lowercase__ : Optional[int] = vqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question="""How many cats are there?""" , top_k=2)
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [{"""score""": ANY(SCREAMING_SNAKE_CASE_), """answer""": ANY(SCREAMING_SNAKE_CASE_)}, {"""score""": ANY(SCREAMING_SNAKE_CASE_), """answer""": ANY(SCREAMING_SNAKE_CASE_)}])
lowercase__ : Optional[int] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
SCREAMING_SNAKE_CASE_ , [{"""score""": ANY(SCREAMING_SNAKE_CASE_), """answer""": ANY(SCREAMING_SNAKE_CASE_)}, {"""score""": ANY(SCREAMING_SNAKE_CASE_), """answer""": ANY(SCREAMING_SNAKE_CASE_)}])
@slow
@require_torch
def lowercase__ ( self):
'''simple docstring'''
lowercase__ : List[Any] = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""")
lowercase__ : Optional[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
lowercase__ : List[Any] = """How many cats are there?"""
lowercase__ : int = vqa_pipeline(image=SCREAMING_SNAKE_CASE_ , question=SCREAMING_SNAKE_CASE_ , top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}])
lowercase__ : Any = vqa_pipeline({"""image""": image, """question""": question} , top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}])
lowercase__ : List[Any] = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2)
self.assertEqual(
nested_simplify(SCREAMING_SNAKE_CASE_ , decimals=4) , [[{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""")
def lowercase__ ( self):
'''simple docstring'''
pass
| 715
|
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
lowerCamelCase__ : Optional[int] = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
lowerCamelCase__ : str = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
lowerCamelCase__ : Optional[Any] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
lowerCamelCase__ : Any = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
lowerCamelCase__ : Optional[int] = """allenai"""
def UpperCamelCase ( lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : int = dict((re.sub(R"""@@$""" , """""" , lowercase_ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , lowercase_ ), v) for k, v in d.items() )
lowercase__ : Optional[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[F'{k}</w>']
lowercase__ : Optional[Any] = d[k] # restore
return da
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
assert os.path.exists(lowercase_ )
os.makedirs(lowercase_ , exist_ok=lowercase_ )
print(F'Writing results to {pytorch_dump_folder_path}' )
# handle various types of models
lowercase__ : Optional[Any] = basename(lowercase_ )
lowercase__ : Union[str, Any] = dirname(lowercase_ )
lowercase__ : Tuple = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
lowercase__ : Optional[Any] = cls.hub_models()
lowercase__ : str = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
lowercase__ : Tuple = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'using checkpoint {checkpoint_file}' )
lowercase__ : Dict = hub_utils.from_pretrained(
lowercase_ , lowercase_ , lowercase_ , archive_map=lowercase_ , **lowercase_ )
lowercase__ : Union[str, Any] = vars(chkpt["""args"""]["""model"""] )
lowercase__ : Union[str, Any] = args["""source_lang"""]
lowercase__ : Dict = args["""target_lang"""]
lowercase__ : List[str] = dirname(lowercase_ )
lowercase__ : Tuple = basename(lowercase_ )
# dicts
lowercase__ : Any = os.path.join(lowercase_ , F'dict.{src_lang}.txt' )
lowercase__ : Union[str, Any] = os.path.join(lowercase_ , F'dict.{tgt_lang}.txt' )
lowercase__ : int = Dictionary.load(lowercase_ )
lowercase__ : int = rewrite_dict_keys(src_dict.indices )
lowercase__ : Optional[int] = len(lowercase_ )
lowercase__ : Any = os.path.join(lowercase_ , """vocab-src.json""" )
print(F'Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
lowercase__ : List[Any] = True
for k in src_vocab.keys():
if not k.islower():
lowercase__ : Any = False
break
lowercase__ : Optional[Any] = Dictionary.load(lowercase_ )
lowercase__ : List[str] = rewrite_dict_keys(tgt_dict.indices )
lowercase__ : Union[str, Any] = len(lowercase_ )
lowercase__ : Tuple = os.path.join(lowercase_ , """vocab-tgt.json""" )
print(F'Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# merges_file (bpecodes)
lowercase__ : str = os.path.join(lowercase_ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
lowercase__ : Tuple = os.path.join(lowercase_ , lowercase_ )
if os.path.exists(lowercase_ ):
break
with open(lowercase_ , encoding="""utf-8""" ) as fin:
lowercase__ : List[str] = fin.read()
lowercase__ : List[str] = re.sub(R""" \d+$""" , """""" , lowercase_ , 0 , re.M ) # remove frequency number
print(F'Generating {merges_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(lowercase_ )
# model config
lowercase__ : int = os.path.join(lowercase_ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'need to extend tokenizer to support bpe={args["bpe"]}'
assert args["tokenizer"] == "moses", F'need to extend tokenizer to support bpe={args["tokenizer"]}'
lowercase__ : List[Any] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
lowercase__ : str = 5
lowercase__ : Union[str, Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
lowercase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
lowercase__ : int = 1.0
print(F'Generating {fsmt_model_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# tokenizer config
lowercase__ : Tuple = os.path.join(lowercase_ , lowercase_ )
lowercase__ : Optional[Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 10_24,
"""do_lower_case""": do_lower_case,
}
print(F'Generating {fsmt_tokenizer_config_file}' )
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(lowercase_ , ensure_ascii=lowercase_ , indent=lowercase_ ) )
# model
lowercase__ : Union[str, Any] = chkpt["""models"""][0]
lowercase__ : Dict = model.state_dict()
# rename keys to start with 'model.'
lowercase__ : Dict = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
lowercase__ : int = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(lowercase_ , lowercase_ )
lowercase__ : Dict = FSMTConfig.from_pretrained(lowercase_ )
lowercase__ : Optional[Any] = FSMTForConditionalGeneration(lowercase_ )
# check that it loads ok
model_new.load_state_dict(lowercase_ , strict=lowercase_ )
# save
lowercase__ : Optional[int] = os.path.join(lowercase_ , lowercase_ )
print(F'Generating {pytorch_weights_dump_path}' )
torch.save(lowercase_ , lowercase_ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(F'cd {data_root}' )
print(F'transformers-cli upload {model_dir}' )
if __name__ == "__main__":
lowerCamelCase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ : List[str] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 495
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """pytorch""",
"""script""": """run_ddp.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6},
},
{
"""framework""": """tensorflow""",
"""script""": """run_tf_dist.py""",
"""model_name_or_path""": """distilbert-base-cased""",
"""instance_type""": """ml.p3.16xlarge""",
"""results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7},
},
] )
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _lowerCamelCase ( self ):
if self.framework == "pytorch":
subprocess.run(
f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=__lowerCAmelCase , )
assert hasattr(self , """env""" )
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = f"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}"""
# distributed data settings
UpperCamelCase__ = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=__lowerCAmelCase , instance_count=__lowerCAmelCase , instance_type=self.instance_type , debugger_hook_config=__lowerCAmelCase , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=__lowerCAmelCase , py_version="""py36""" , )
def _lowerCamelCase ( self , __lowerCAmelCase ):
TrainingJobAnalytics(__lowerCAmelCase ).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(2,)] )
def _lowerCamelCase ( self , __lowerCAmelCase ):
# create estimator
UpperCamelCase__ = self.create_estimator(__lowerCAmelCase )
# run training
estimator.fit()
# result dataframe
UpperCamelCase__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
UpperCamelCase__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCamelCase__ = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f"""{estimator.latest_training_job.name}.json""" , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , __lowerCAmelCase )
| 619
|
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
UpperCamelCase__ = {
"vocab_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/vocab.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/vocab.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/vocab.json",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json"
),
},
"merges_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/merges.txt",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/merges.txt",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/merges.txt",
"roberta-base-openai-detector": "https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt",
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt"
),
},
"tokenizer_file": {
"roberta-base": "https://huggingface.co/roberta-base/resolve/main/tokenizer.json",
"roberta-large": "https://huggingface.co/roberta-large/resolve/main/tokenizer.json",
"roberta-large-mnli": "https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json",
"distilroberta-base": "https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json",
"roberta-base-openai-detector": (
"https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json"
),
"roberta-large-openai-detector": (
"https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json"
),
},
}
UpperCamelCase__ = {
"roberta-base": 512,
"roberta-large": 512,
"roberta-large-mnli": 512,
"distilroberta-base": 512,
"roberta-base-openai-detector": 512,
"roberta-large-openai-detector": 512,
}
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : Optional[Any] = VOCAB_FILES_NAMES
snake_case : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case : Dict = ["""input_ids""", """attention_mask"""]
snake_case : List[Any] = RobertaTokenizer
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="replace" , __lowerCAmelCase="<s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="<mask>" , __lowerCAmelCase=False , __lowerCAmelCase=True , **__lowerCAmelCase , ):
super().__init__(
__lowerCAmelCase , __lowerCAmelCase , tokenizer_file=__lowerCAmelCase , errors=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , sep_token=__lowerCAmelCase , cls_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , mask_token=__lowerCAmelCase , add_prefix_space=__lowerCAmelCase , trim_offsets=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
UpperCamelCase__ = getattr(__lowerCAmelCase , pre_tok_state.pop("""type""" ) )
UpperCamelCase__ = add_prefix_space
UpperCamelCase__ = pre_tok_class(**__lowerCAmelCase )
UpperCamelCase__ = add_prefix_space
UpperCamelCase__ = """post_processor"""
UpperCamelCase__ = getattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
if tokenizer_component_instance:
UpperCamelCase__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
UpperCamelCase__ = tuple(state["""sep"""] )
if "cls" in state:
UpperCamelCase__ = tuple(state["""cls"""] )
UpperCamelCase__ = False
if state.get("""add_prefix_space""" , __lowerCAmelCase ) != add_prefix_space:
UpperCamelCase__ = add_prefix_space
UpperCamelCase__ = True
if state.get("""trim_offsets""" , __lowerCAmelCase ) != trim_offsets:
UpperCamelCase__ = trim_offsets
UpperCamelCase__ = True
if changes_to_apply:
UpperCamelCase__ = getattr(__lowerCAmelCase , state.pop("""type""" ) )
UpperCamelCase__ = component_class(**__lowerCAmelCase )
setattr(self.backend_tokenizer , __lowerCAmelCase , __lowerCAmelCase )
@property
def _lowerCamelCase ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self , __lowerCAmelCase ):
UpperCamelCase__ = AddedToken(__lowerCAmelCase , lstrip=__lowerCAmelCase , rstrip=__lowerCAmelCase ) if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else value
UpperCamelCase__ = value
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
UpperCamelCase__ = kwargs.get("""is_split_into_words""" , __lowerCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = self._tokenizer.model.save(__lowerCAmelCase , name=__lowerCAmelCase )
return tuple(__lowerCAmelCase )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=None ):
UpperCamelCase__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 619
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__a = {"""configuration_unispeech""": ["""UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP""", """UniSpeechConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"""UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""UniSpeechForCTC""",
"""UniSpeechForPreTraining""",
"""UniSpeechForSequenceClassification""",
"""UniSpeechModel""",
"""UniSpeechPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 689
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class UpperCamelCase__( lowerCAmelCase__ ):
"""simple docstring"""
_A = "Wav2Vec2FeatureExtractor"
_A = "AutoTokenizer"
def __init__( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
"""simple docstring"""
super().__init__(snake_case__ , snake_case__ )
A =self.feature_extractor
A =False
@classmethod
def _a ( cls : List[str] , snake_case__ : Union[str, Any] , **snake_case__ : Dict ):
"""simple docstring"""
try:
return super().from_pretrained(snake_case__ , **snake_case__ )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
" include a `tokenizer_class` attribute is deprecated and will be "
"removed in v5. Please add `'tokenizer_class': 'Wav2Vec2CTCTokenizer'`"
" attribute to either your `config.json` or `tokenizer_config.json` "
"file to suppress this warning: " , snake_case__ , )
A =WavaVecaFeatureExtractor.from_pretrained(snake_case__ , **snake_case__ )
A =WavaVecaCTCTokenizer.from_pretrained(snake_case__ , **snake_case__ )
return cls(feature_extractor=snake_case__ , tokenizer=snake_case__ )
def __call__( self : Optional[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*snake_case__ , **snake_case__ )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
A =kwargs.pop("raw_speech" )
else:
A =kwargs.pop("audio" , snake_case__ )
A =kwargs.pop("sampling_rate" , snake_case__ )
A =kwargs.pop("text" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
A =self.feature_extractor(snake_case__ , *snake_case__ , sampling_rate=snake_case__ , **snake_case__ )
if text is not None:
A =self.tokenizer(snake_case__ , **snake_case__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A =encodings["input_ids"]
return inputs
def _a ( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Union[str, Any] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*snake_case__ , **snake_case__ )
A =kwargs.pop("input_features" , snake_case__ )
A =kwargs.pop("labels" , snake_case__ )
if len(snake_case__ ) > 0:
A =args[0]
A =args[1:]
if input_features is not None:
A =self.feature_extractor.pad(snake_case__ , *snake_case__ , **snake_case__ )
if labels is not None:
A =self.tokenizer.pad(snake_case__ , **snake_case__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A =labels["input_ids"]
return input_features
def _a ( self : List[str] , *snake_case__ : Dict , **snake_case__ : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case__ , **snake_case__ )
def _a ( self : List[str] , *snake_case__ : Optional[int] , **snake_case__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case__ , **snake_case__ )
@contextmanager
def _a ( self : int ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
A =True
A =self.tokenizer
yield
A =self.feature_extractor
A =False
| 689
| 1
|
def UpperCAmelCase_ (_lowerCAmelCase : str , _lowerCAmelCase : str = " " ):
__UpperCamelCase : List[Any] = []
__UpperCamelCase : Union[str, Any] = 0
for index, char in enumerate(_lowerCAmelCase ):
if char == separator:
split_words.append(string[last_index:index] )
__UpperCamelCase : Any = index + 1
elif index + 1 == len(_lowerCAmelCase ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 327
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def UpperCAmelCase_ ():
__UpperCamelCase : Any = ArgumentParser("Accelerate CLI tool" , usage="accelerate <command> [<args>]" , allow_abbrev=_lowerCAmelCase )
__UpperCamelCase : Optional[Any] = parser.add_subparsers(help="accelerate command helpers" )
# Register commands
get_config_parser(subparsers=_lowerCAmelCase )
env_command_parser(subparsers=_lowerCAmelCase )
launch_command_parser(subparsers=_lowerCAmelCase )
tpu_command_parser(subparsers=_lowerCAmelCase )
test_command_parser(subparsers=_lowerCAmelCase )
# Let's go
__UpperCamelCase : int = parser.parse_args()
if not hasattr(_lowerCAmelCase , "func" ):
parser.print_help()
exit(1 )
# Run
args.func(_lowerCAmelCase )
if __name__ == "__main__":
main()
| 327
| 1
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
a : List[str] = StableDiffusionDiffEditPipeline
a : Dict = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""height""", """width""", """image"""} | {"""image_latents"""}
a : int = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"""image"""} | {"""image_latents"""}
a : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a : List[Any] = frozenset([] )
def UpperCAmelCase_ ( self ) -> List[Any]:
torch.manual_seed(0 )
__lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A__ , )
__lowerCAmelCase = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=A__ , set_alpha_to_one=A__ , )
__lowerCAmelCase = DDIMInverseScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=A__ , set_alpha_to_zero=A__ , )
torch.manual_seed(0 )
__lowerCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
__lowerCAmelCase = CLIPTextModel(A__ )
__lowerCAmelCase = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__lowerCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> Tuple:
__lowerCAmelCase = floats_tensor((1, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
__lowerCAmelCase = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A__ ) ).to(A__ )
if str(A__ ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(A__ )
else:
__lowerCAmelCase = torch.Generator(device=A__ ).manual_seed(A__ )
__lowerCAmelCase = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> int:
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(A__ ) ).convert("RGB" )
if str(A__ ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(A__ )
else:
__lowerCAmelCase = torch.Generator(device=A__ ).manual_seed(A__ )
__lowerCAmelCase = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase=0 ) -> str:
__lowerCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(A__ ) ).to(A__ )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCAmelCase = Image.fromarray(np.uinta(A__ ) ).convert("RGB" )
if str(A__ ).startswith("mps" ):
__lowerCAmelCase = torch.manual_seed(A__ )
else:
__lowerCAmelCase = torch.Generator(device=A__ ).manual_seed(A__ )
__lowerCAmelCase = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase_ ( self ) -> str:
if not hasattr(self.pipeline_class , "_optional_components" ):
return
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A__ , A__ , A__ )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__lowerCAmelCase = self.get_dummy_inputs(A__ )
__lowerCAmelCase = pipe(**A__ )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A__ )
__lowerCAmelCase = self.pipeline_class.from_pretrained(A__ )
pipe_loaded.to(A__ )
pipe_loaded.set_progress_bar_config(disable=A__ )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A__ , A__ ) is None , F'''`{optional_component}` did not stay set to None after loading.''' , )
__lowerCAmelCase = self.get_dummy_inputs(A__ )
__lowerCAmelCase = pipe_loaded(**A__ )[0]
__lowerCAmelCase = np.abs(output - output_loaded ).max()
self.assertLess(A__ , 1E-4 )
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowerCAmelCase = self.get_dummy_mask_inputs(A__ )
__lowerCAmelCase = pipe.generate_mask(**A__ )
__lowerCAmelCase = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__lowerCAmelCase = np.array([0] * 9 )
__lowerCAmelCase = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A__ , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def UpperCAmelCase_ ( self ) -> Any:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowerCAmelCase = self.get_dummy_inversion_inputs(A__ )
__lowerCAmelCase = pipe.invert(**A__ ).images
__lowerCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__lowerCAmelCase = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A__ , 1E-3 )
def UpperCAmelCase_ ( self ) -> int:
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def UpperCAmelCase_ ( self ) -> int:
__lowerCAmelCase = "cpu"
__lowerCAmelCase = self.get_dummy_components()
__lowerCAmelCase = {"beta_start": 0.0_00_85, "beta_end": 0.0_12, "beta_schedule": "scaled_linear"}
__lowerCAmelCase = DPMSolverMultistepScheduler(**A__ )
__lowerCAmelCase = DPMSolverMultistepInverseScheduler(**A__ )
__lowerCAmelCase = self.pipeline_class(**A__ )
pipe.to(A__ )
pipe.set_progress_bar_config(disable=A__ )
__lowerCAmelCase = self.get_dummy_inversion_inputs(A__ )
__lowerCAmelCase = pipe.invert(**A__ ).images
__lowerCAmelCase = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__lowerCAmelCase = np.array(
[0.51_50, 0.51_34, 0.50_43, 0.53_76, 0.46_94, 0.5_10_50, 0.50_15, 0.44_07, 0.47_99] , )
__lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A__ , 1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
def UpperCAmelCase_ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def UpperCAmelCase_ ( cls ) -> Any:
__lowerCAmelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
__lowerCAmelCase = raw_image.convert("RGB" ).resize((768, 768) )
__lowerCAmelCase = raw_image
def UpperCAmelCase_ ( self ) -> Optional[Any]:
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=A__ , torch_dtype=torch.floataa )
__lowerCAmelCase = DDIMScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A__ )
__lowerCAmelCase = "a bowl of fruit"
__lowerCAmelCase = "a bowl of pears"
__lowerCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=A__ , target_prompt=A__ , generator=A__ , )
__lowerCAmelCase = pipe.invert(
prompt=A__ , image=self.raw_image , inpaint_strength=0.7 , generator=A__ ).latents
__lowerCAmelCase = pipe(
prompt=A__ , mask_image=A__ , image_latents=A__ , generator=A__ , negative_prompt=A__ , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
__lowerCAmelCase = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def UpperCAmelCase_ ( self ) -> List[Any]:
__lowerCAmelCase = torch.manual_seed(0 )
__lowerCAmelCase = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=A__ , torch_dtype=torch.floataa )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__lowerCAmelCase = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A__ )
__lowerCAmelCase = "a bowl of fruit"
__lowerCAmelCase = "a bowl of pears"
__lowerCAmelCase = pipe.generate_mask(
image=self.raw_image , source_prompt=A__ , target_prompt=A__ , generator=A__ , )
__lowerCAmelCase = pipe.invert(
prompt=A__ , image=self.raw_image , inpaint_strength=0.7 , generator=A__ , num_inference_steps=25 , ).latents
__lowerCAmelCase = pipe(
prompt=A__ , mask_image=A__ , image_latents=A__ , generator=A__ , negative_prompt=A__ , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
__lowerCAmelCase = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 700
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[str] = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
a__ = '''src/transformers'''
# This is to make sure the transformers module imported is the one in the repo.
a__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
a__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
a__ = {
# used to compute the property `self.chunk_length`
'''EncodecConfig''': ['''overlap'''],
# used as `self.bert_model = BertModel(config, ...)`
'''DPRConfig''': True,
# not used in modeling files, but it's an important information
'''FSMTConfig''': ['''langs'''],
# used internally in the configuration class file
'''GPTNeoConfig''': ['''attention_types'''],
# used internally in the configuration class file
'''EsmConfig''': ['''is_folding_model'''],
# used during training (despite we don't have training script for these models yet)
'''Mask2FormerConfig''': ['''ignore_value'''],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
'''OneFormerConfig''': ['''ignore_value''', '''norm'''],
# used during preprocessing and collation, see `collating_graphormer.py`
'''GraphormerConfig''': ['''spatial_pos_max'''],
# used internally in the configuration class file
'''T5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
'''MT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
'''UMT5Config''': ['''feed_forward_proj''', '''tokenizer_class'''],
# used internally in the configuration class file
'''LongT5Config''': ['''feed_forward_proj'''],
# used internally in the configuration class file
'''SwitchTransformersConfig''': ['''feed_forward_proj'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''BioGptConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''GLPNConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''SegformerConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''CvtConfig''': ['''layer_norm_eps'''],
# having default values other than `1e-5` - we can't fix them without breaking
'''PerceiverConfig''': ['''layer_norm_eps'''],
# used internally to calculate the feature size
'''InformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''TimeSeriesTransformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate the feature size
'''AutoformerConfig''': ['''num_static_real_features''', '''num_time_features'''],
# used internally to calculate `mlp_dim`
'''SamVisionConfig''': ['''mlp_ratio'''],
# For (head) training, but so far not implemented
'''ClapAudioConfig''': ['''num_classes'''],
# Not used, but providing useful information to users
'''SpeechT5HifiGanConfig''': ['''sampling_rate'''],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def A__ (snake_case : str , snake_case : int , snake_case : Optional[Any] , snake_case : Optional[Any] ) -> Any:
__UpperCamelCase : Any = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
__UpperCamelCase : str = True
# Deal with multi-line cases
elif (
re.search(
rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , snake_case , )
is not None
):
__UpperCamelCase : Optional[int] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__UpperCamelCase : Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__UpperCamelCase : Any = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
__UpperCamelCase : str = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
__UpperCamelCase : str = True
if not attribute_used:
__UpperCamelCase : Optional[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__UpperCamelCase : Optional[int] = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__UpperCamelCase : List[str] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__UpperCamelCase : Optional[Any] = True
elif attribute.endswith("""_token_id""" ):
__UpperCamelCase : int = True
# configuration class specific cases
if not case_allowed:
__UpperCamelCase : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
__UpperCamelCase : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A__ (snake_case : List[Any] ) -> Dict:
__UpperCamelCase : Optional[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
__UpperCamelCase : Optional[Any] = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
__UpperCamelCase : str = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__UpperCamelCase : Dict = {}
if len(config_class.attribute_map ) > 0:
__UpperCamelCase : Dict = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__UpperCamelCase : List[Any] = inspect.getsourcefile(snake_case )
__UpperCamelCase : List[Any] = os.path.dirname(snake_case )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__UpperCamelCase : List[Any] = [os.path.join(snake_case , snake_case ) for fn in os.listdir(snake_case ) if fn.startswith("""modeling_""" )]
# Get the source code strings
__UpperCamelCase : Any = []
for path in modeling_paths:
if os.path.isfile(snake_case ):
with open(snake_case ) as fp:
modeling_sources.append(fp.read() )
__UpperCamelCase : List[str] = []
for config_param, default_value in zip(snake_case , snake_case ):
# `attributes` here is all the variant names for `config_param`
__UpperCamelCase : str = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case , snake_case , snake_case , snake_case ):
unused_attributes.append(attributes[0] )
return sorted(snake_case )
def A__ () -> Dict:
__UpperCamelCase : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__UpperCamelCase : Tuple = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda snake_case : inspect.isclass(snake_case )
and issubclass(snake_case , snake_case )
and inspect.getmodule(snake_case ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
__UpperCamelCase : List[str] = check_config_attributes_being_used(snake_case )
if len(snake_case ) > 0:
__UpperCamelCase : List[Any] = unused_attributes
if len(snake_case ) > 0:
__UpperCamelCase : str = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(snake_case )
if __name__ == "__main__":
check_config_attributes()
| 279
|
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE_ ( _UpperCamelCase ):
"""simple docstring"""
__magic_name__ : Dict = 'linear'
__magic_name__ : Dict = 'cosine'
__magic_name__ : Optional[int] = 'cosine_with_restarts'
__magic_name__ : List[str] = 'polynomial'
__magic_name__ : Any = 'constant'
__magic_name__ : Union[str, Any] = 'constant_with_warmup'
__magic_name__ : str = 'piecewise_constant'
def A__ (snake_case : Optimizer , snake_case : int = -1 ) -> Optional[Any]:
return LambdaLR(snake_case , lambda snake_case : 1 , last_epoch=snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int = -1 ) -> List[Any]:
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1.0 , snake_case ) )
return 1.0
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def A__ (snake_case : Optimizer , snake_case : str , snake_case : int = -1 ) -> Union[str, Any]:
__UpperCamelCase : Optional[Any] = {}
__UpperCamelCase : int = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__UpperCamelCase , __UpperCamelCase : Tuple = rule_str.split(""":""" )
__UpperCamelCase : int = int(snake_case )
__UpperCamelCase : Union[str, Any] = float(snake_case )
__UpperCamelCase : Optional[int] = value
__UpperCamelCase : Dict = float(rule_list[-1] )
def create_rules_function(snake_case : List[str] , snake_case : Any ):
def rule_func(snake_case : int ) -> float:
__UpperCamelCase : Any = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__UpperCamelCase : Tuple = create_rules_function(snake_case , snake_case )
return LambdaLR(snake_case , snake_case , last_epoch=snake_case )
def A__ (snake_case : int , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : str=-1 ) -> str:
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : float = 0.5 , snake_case : int = -1 ) -> List[str]:
def lr_lambda(snake_case : Dict ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
__UpperCamelCase : Dict = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(snake_case ) * 2.0 * progress )) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : Optimizer , snake_case : int , snake_case : int , snake_case : int = 1 , snake_case : int = -1 ) -> Tuple:
def lr_lambda(snake_case : Optional[int] ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
__UpperCamelCase : List[str] = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(snake_case ) * progress) % 1.0) )) )
return LambdaLR(snake_case , snake_case , snake_case )
def A__ (snake_case : List[str] , snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : str=1e-7 , snake_case : List[str]=1.0 , snake_case : Dict=-1 ) -> Tuple:
__UpperCamelCase : Tuple = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(F'''lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})''' )
def lr_lambda(snake_case : int ):
if current_step < num_warmup_steps:
return float(snake_case ) / float(max(1 , snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__UpperCamelCase : List[str] = lr_init - lr_end
__UpperCamelCase : Any = num_training_steps - num_warmup_steps
__UpperCamelCase : List[str] = 1 - (current_step - num_warmup_steps) / decay_steps
__UpperCamelCase : List[Any] = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(snake_case , snake_case , snake_case )
a__ = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def A__ (snake_case : Union[str, SchedulerType] , snake_case : Optimizer , snake_case : Optional[str] = None , snake_case : Optional[int] = None , snake_case : Optional[int] = None , snake_case : int = 1 , snake_case : float = 1.0 , snake_case : int = -1 , ) -> Dict:
__UpperCamelCase : List[str] = SchedulerType(snake_case )
__UpperCamelCase : int = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(snake_case , last_epoch=snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(snake_case , step_rules=snake_case , last_epoch=snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F'''{name} requires `num_warmup_steps`, please provide that argument.''' )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(snake_case , num_warmup_steps=snake_case , last_epoch=snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F'''{name} requires `num_training_steps`, please provide that argument.''' )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , num_cycles=snake_case , last_epoch=snake_case , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , power=snake_case , last_epoch=snake_case , )
return schedule_func(
snake_case , num_warmup_steps=snake_case , num_training_steps=snake_case , last_epoch=snake_case )
| 279
| 1
|
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : Union[str, Any] = BigBirdConfig.from_json_file(a__ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCamelCase__ : str = BigBirdForQuestionAnswering(a__ )
else:
lowerCamelCase__ : List[Any] = BigBirdForPreTraining(a__ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(a__ , a__ , is_trivia_qa=a__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(a__ )
if __name__ == "__main__":
_UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--big_bird_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_trivia_qa""", action="""store_true""", help="""Whether to convert a model with a trivia_qa head."""
)
_UpperCAmelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 716
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_clip""": [
"""CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPConfig""",
"""CLIPOnnxConfig""",
"""CLIPTextConfig""",
"""CLIPVisionConfig""",
],
"""processing_clip""": ["""CLIPProcessor"""],
"""tokenization_clip""": ["""CLIPTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ["""CLIPTokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = ["""CLIPFeatureExtractor"""]
_UpperCAmelCase : Union[str, Any] = ["""CLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPModel""",
"""CLIPPreTrainedModel""",
"""CLIPTextModel""",
"""CLIPTextModelWithProjection""",
"""CLIPVisionModel""",
"""CLIPVisionModelWithProjection""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFCLIPModel""",
"""TFCLIPPreTrainedModel""",
"""TFCLIPTextModel""",
"""TFCLIPVisionModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"""FlaxCLIPModel""",
"""FlaxCLIPPreTrainedModel""",
"""FlaxCLIPTextModel""",
"""FlaxCLIPTextPreTrainedModel""",
"""FlaxCLIPVisionModel""",
"""FlaxCLIPVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 188
| 0
|
"""simple docstring"""
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = [
"""word_embeddings_layernorm.weight""",
"""word_embeddings_layernorm.bias""",
"""input_layernorm.weight""",
"""input_layernorm.bias""",
"""post_attention_layernorm.weight""",
"""post_attention_layernorm.bias""",
"""self_attention.dense.bias""",
"""mlp.dense_4h_to_h.bias""",
"""ln_f.weight""",
"""ln_f.bias""",
]
__SCREAMING_SNAKE_CASE = [
"""mlp.dense_4h_to_h.weight""",
"""self_attention.dense.weight""",
]
def __a ( a, a ):
"""simple docstring"""
_a = {
"word_embeddings.weight": "word_embeddings.weight",
"word_embeddings.norm.weight": "word_embeddings_layernorm.weight",
"word_embeddings.norm.bias": "word_embeddings_layernorm.bias",
"weight": "ln_f.weight",
"bias": "ln_f.bias",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
_a = int(re.match(R".*layer_(\d*).*", a )[1] )
layer_number -= 3
return F'h.{layer_number}.' + key
def __a ( a ):
"""simple docstring"""
if dtype == torch.bool:
return 1 / 8
_a = re.search(R"[^\d](\d+)$", str(a ) )
if bit_search is None:
raise ValueError(F'`dtype` is not a valid dtype: {dtype}.' )
_a = int(bit_search.groups()[0] )
return bit_size // 8
def __a ( a, a, a, a, a ):
"""simple docstring"""
if bloom_config_file == "":
_a = BloomConfig()
else:
_a = BloomConfig.from_json_file(a )
if shard_model:
_a = os.listdir(a )
_a = sorted(filter(lambda a : s.startswith("layer" ) and "model_00" in s, a ) )
_a = {"weight_map": {}, "metadata": {}}
_a = 0
_a = None
_a = BloomConfig()
for j, file in enumerate(a ):
print("Processing file: {}".format(a ) )
_a = None
for i in range(a ):
# load all TP files
_a = file.replace("model_00", F'model_0{i}' )
_a = torch.load(os.path.join(a, a ), map_location="cpu" )
# Rename keys in the transformers names
_a = list(temp.keys() )
for key in keys:
_a = temp.pop(a )
if tensors is None:
_a = temp
else:
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_a = torch.cat([tensors[key], temp[key]], dim=a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_a = tensors[key] / pretraining_tp
torch.save(
a, os.path.join(
a, "pytorch_model_{}-of-{}.bin".format(str(j + 1 ).zfill(5 ), str(len(a ) ).zfill(5 ) ), ), )
for key in tensors.keys():
_a = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
_a = "pytorch_model_{}-of-{}.bin".format(
str(j + 1 ).zfill(5 ), str(len(a ) ).zfill(5 ) )
_a = BloomConfig()
_a = pytorch_dump_folder_path + "/" + CONFIG_NAME
_a = total_size
with open(a, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(a, WEIGHTS_NAME + ".index.json" ), "w", encoding="utf-8" ) as f:
_a = json.dumps(a, indent=2, sort_keys=a ) + "\n"
f.write(a )
else:
_a = BloomModel(a )
_a = os.listdir(a )
_a = sorted(filter(lambda a : s.startswith("layer" ) and "model_00" in s, a ) )
_a = None
for i, file in enumerate(a ):
_a = None
for i in range(a ):
# load all TP files
_a = file.replace("model_00", F'model_0{i}' )
_a = torch.load(os.path.join(a, a ), map_location="cpu" )
# Rename keys in the transformers names
_a = list(temp.keys() )
for key in keys:
_a = temp.pop(a )
if tensors is None:
_a = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
_a = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
_a = torch.cat([tensors[key], temp[key]], dim=a )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(a ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
_a = tensors[key] / pretraining_tp
_a = model.load_state_dict(a, strict=a )
assert not other_keys.unexpected_keys, F'The keys {other_keys.unexpected_keys} are unexpected'
if missing_keys is None:
_a = set(other_keys.missing_keys )
else:
_a = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F'The keys {missing_keys} are missing'
# Save pytorch-model
os.makedirs(a, exist_ok=a )
_a = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
_a = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}' )
if config.torch_dtype is not None:
_a = model.to(config.torch_dtype )
torch.save(model.state_dict(), a )
print(F'Save configuration file to {pytorch_config_dump_path}' )
with open(a, "w", encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--bloom_checkpoint_path""",
default=None,
type=str,
required=True,
help="""Path to the Megatron-LM checkpoint path.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--bloom_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--shard_model""",
action="""store_true""",
help="""An optional setting to shard the output model \nThis enables sharding the converted checkpoint""",
)
parser.add_argument(
"""--pretraining_tp""",
default=4,
type=int,
help="""Pretraining TP rank that has been used when training the model in Megatron-LM \n""",
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 388
|
"""simple docstring"""
from math import ceil
def __a ( a, a ):
"""simple docstring"""
_a = list(range(0, a ) )
_a = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
_a = []
for i in device_map_blocks:
if device_map_blocks.count(a ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(a )
# Missing blocks
_a = [i for i in blocks if i not in device_map_blocks]
_a = [i for i in device_map_blocks if i not in blocks]
if len(a ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(a ) )
if len(a ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(a ) )
def __a ( a, a ):
"""simple docstring"""
_a = list(range(a ) )
_a = int(ceil(n_layers / len(a ) ) )
_a = [layers[i : i + n_blocks] for i in range(0, a, a )]
return dict(zip(a, a ) )
| 388
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
A__: Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[int] ) -> Any:
# initialize config
if "resnet-50" in model_name:
_a : Tuple =ResNetConfig.from_pretrained("""microsoft/resnet-50""" )
elif "resnet-101" in model_name:
_a : Dict =ResNetConfig.from_pretrained("""microsoft/resnet-101""" )
else:
raise ValueError("""Model name should include either resnet50 or resnet101""" )
_a : Dict =DetrConfig(use_timm_backbone=_lowerCamelCase ,backbone_config=_lowerCamelCase )
# set label attributes
_a : Any ="""panoptic""" in model_name
if is_panoptic:
_a : Tuple =250
else:
_a : Tuple =91
_a : int ="""huggingface/label-files"""
_a : Optional[int] ="""coco-detection-id2label.json"""
_a : str =json.load(open(hf_hub_download(_lowerCamelCase ,_lowerCamelCase ,repo_type="""dataset""" ) ,"""r""" ) )
_a : Union[str, Any] ={int(_lowerCamelCase ): v for k, v in idalabel.items()}
_a : List[str] =idalabel
_a : Optional[int] ={v: k for k, v in idalabel.items()}
return config, is_panoptic
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Any ) -> Optional[int]:
# here we list all keys to be renamed (original name on the left, our name on the right)
_a : List[str] =[]
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.conv1.weight""", """backbone.conv_encoder.model.embedder.embedder.convolution.weight""") )
rename_keys.append(("""backbone.0.body.bn1.weight""", """backbone.conv_encoder.model.embedder.embedder.normalization.weight""") )
rename_keys.append(("""backbone.0.body.bn1.bias""", """backbone.conv_encoder.model.embedder.embedder.normalization.bias""") )
rename_keys.append(("""backbone.0.body.bn1.running_mean""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_mean""") )
rename_keys.append(("""backbone.0.body.bn1.running_var""", """backbone.conv_encoder.model.embedder.embedder.normalization.running_var""") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean",
) )
rename_keys.append(
(
F"backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var",
F"backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
F"transformer.encoder.layers.{i}.self_attn.out_proj.weight",
F"encoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
F"transformer.decoder.layers.{i}.self_attn.out_proj.weight",
F"decoder.layers.{i}.self_attn.out_proj.weight",
) )
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias") )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
) )
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
) )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias") )
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight") )
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
] )
return rename_keys
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : List[Any] ) -> int:
_a : Any =state_dict.pop(_lowerCamelCase )
_a : Any =val
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ,_UpperCAmelCase : Optional[Any]=False ) -> Tuple:
_a : Any =""""""
if is_panoptic:
_a : Dict ="""detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_a : Optional[int] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight" )
_a : Optional[int] =state_dict.pop(F"{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : List[str] =in_proj_weight[:256, :]
_a : List[Any] =in_proj_bias[:256]
_a : List[str] =in_proj_weight[256:512, :]
_a : Optional[int] =in_proj_bias[256:512]
_a : str =in_proj_weight[-256:, :]
_a : Optional[int] =in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_a : Optional[Any] =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight" )
_a : Optional[int] =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias" )
# next, add query, keys and values (in that order) to the state dict
_a : List[str] =in_proj_weight[:256, :]
_a : Union[str, Any] =in_proj_bias[:256]
_a : List[str] =in_proj_weight[256:512, :]
_a : Any =in_proj_bias[256:512]
_a : Any =in_proj_weight[-256:, :]
_a : Tuple =in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
_a : List[Any] =state_dict.pop(
F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight" )
_a : int =state_dict.pop(F"{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_a : List[Any] =in_proj_weight_cross_attn[:256, :]
_a : Dict =in_proj_bias_cross_attn[:256]
_a : Union[str, Any] =in_proj_weight_cross_attn[256:512, :]
_a : Tuple =in_proj_bias_cross_attn[256:512]
_a : Any =in_proj_weight_cross_attn[-256:, :]
_a : Tuple =in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : str ="""http://images.cocodataset.org/val2017/000000039769.jpg"""
_a : Union[str, Any] =Image.open(requests.get(_lowerCamelCase ,stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : Optional[int]=None ,_UpperCAmelCase : Dict=False ) -> Optional[Any]:
_a , _a : int =get_detr_config(_lowerCamelCase )
# load original model from torch hub
_a : str ={
"""detr-resnet-50""": """detr_resnet50""",
"""detr-resnet-101""": """detr_resnet101""",
}
logger.info(F"Converting model {model_name}..." )
_a : Any =torch.hub.load("""facebookresearch/detr""" ,model_name_to_original_name[model_name] ,pretrained=_lowerCamelCase ).eval()
_a : int =detr.state_dict()
# rename keys
for src, dest in create_rename_keys(_lowerCamelCase ):
if is_panoptic:
_a : Any ="""detr.""" + src
rename_key(_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(_lowerCamelCase ,is_panoptic=_lowerCamelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_a : int ="""detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_a : str =state_dict.pop(_lowerCamelCase )
_a : Optional[int] =val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_a : Tuple =state_dict.pop(_lowerCamelCase )
_a : str =val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_a : Optional[int] =state_dict.pop(_lowerCamelCase )
_a : str =val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_a : int =state_dict.pop(_lowerCamelCase )
_a : Optional[Any] =val
# finally, create HuggingFace model and load state dict
_a : str =DetrForSegmentation(_lowerCamelCase ) if is_panoptic else DetrForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
# verify our conversion on an image
_a : Any ="""coco_panoptic""" if is_panoptic else """coco_detection"""
_a : Union[str, Any] =DetrImageProcessor(format=_lowerCamelCase )
_a : List[Any] =processor(images=prepare_img() ,return_tensors="""pt""" )
_a : Any =encoding["""pixel_values"""]
_a : Tuple =detr(_lowerCamelCase )
_a : int =model(_lowerCamelCase )
assert torch.allclose(outputs.logits ,original_outputs["""pred_logits"""] ,atol=1e-3 )
assert torch.allclose(outputs.pred_boxes ,original_outputs["""pred_boxes"""] ,atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks ,original_outputs["""pred_masks"""] ,atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("""Uploading PyTorch model and image processor to the hub...""" )
model.push_to_hub(F"nielsr/{model_name}" )
processor.push_to_hub(F"nielsr/{model_name}" )
if __name__ == "__main__":
A__: Dict = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''detr-resnet-50''',
type=str,
choices=['''detr-resnet-50''', '''detr-resnet-101'''],
help='''Name of the DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub or not.''')
A__: Optional[Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 700
|
'''simple docstring'''
from functools import lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> set:
_a : Any =2
_a : Tuple =set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(_UpperCAmelCase )
if n > 1:
factors.add(_UpperCAmelCase )
return factors
@lru_cache
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> int:
return len(unique_prime_factors(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : list ) -> bool:
return len(set(_UpperCAmelCase ) ) in (0, 1)
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int ) -> list:
_a : int =2
while True:
# Increment each value of a generated range
_a : str =[base + i for i in range(_UpperCAmelCase )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
_a : List[Any] =[upf_len(_UpperCAmelCase ) for x in group]
checker.append(_UpperCAmelCase )
# If all numbers in the list are equal, return the group variable.
if equality(_UpperCAmelCase ):
return group
# Increment our base variable by 1
base += 1
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4 ) -> int:
_a : Optional[int] =run(_UpperCAmelCase )
return results[0] if len(_UpperCAmelCase ) else None
if __name__ == "__main__":
print(solution())
| 506
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 382
|
from __future__ import annotations
def UpperCAmelCase ( UpperCAmelCase ,UpperCAmelCase ,UpperCAmelCase )-> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 393
| 0
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class lowercase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCamelCase : int=None , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(features=__lowerCamelCase )
lowerCamelCase__ = torch_tensor_kwargs
import torch # noqa import torch at initialization
def a__ ( self : Tuple , __lowerCamelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
import torch
if isinstance(__lowerCamelCase , __lowerCamelCase ) and column:
if all(
isinstance(__lowerCamelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(__lowerCamelCase )
return column
def a__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> Any:
'''simple docstring'''
import torch
if isinstance(__lowerCamelCase , (str, bytes, type(__lowerCamelCase )) ):
return value
elif isinstance(__lowerCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowerCamelCase__ = {}
if isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowerCamelCase__ = {"dtype": torch.intaa}
elif isinstance(__lowerCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowerCamelCase__ = {"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__lowerCamelCase , PIL.Image.Image ):
lowerCamelCase__ = np.asarray(__lowerCamelCase )
return torch.tensor(__lowerCamelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def a__ ( self : int , __lowerCamelCase : Dict ) -> Any:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(__lowerCamelCase , "__array__" ) and not isinstance(__lowerCamelCase , torch.Tensor ):
lowerCamelCase__ = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__lowerCamelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
elif isinstance(__lowerCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__lowerCamelCase ) for substruct in data_struct] )
return self._tensorize(__lowerCamelCase )
def a__ ( self : Tuple , __lowerCamelCase : dict ) -> Tuple:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __lowerCamelCase , map_list=__lowerCamelCase )
def a__ ( self : Dict , __lowerCamelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_row(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_row(__lowerCamelCase )
return self.recursive_tensorize(__lowerCamelCase )
def a__ ( self : List[Any] , __lowerCamelCase : pa.Table ) -> "torch.Tensor":
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_column(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_column(__lowerCamelCase , pa_table.column_names[0] )
lowerCamelCase__ = self.recursive_tensorize(__lowerCamelCase )
lowerCamelCase__ = self._consolidate(__lowerCamelCase )
return column
def a__ ( self : Optional[int] , __lowerCamelCase : pa.Table ) -> Mapping:
'''simple docstring'''
lowerCamelCase__ = self.numpy_arrow_extractor().extract_batch(__lowerCamelCase )
lowerCamelCase__ = self.python_features_decoder.decode_batch(__lowerCamelCase )
lowerCamelCase__ = self.recursive_tensorize(__lowerCamelCase )
for column_name in batch:
lowerCamelCase__ = self._consolidate(batch[column_name] )
return batch
| 187
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = StableDiffusionDiffEditPipeline
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
lowerCAmelCase__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
lowerCAmelCase__ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase__ = frozenset([] )
def a__ ( self : int ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__lowerCamelCase , )
lowerCamelCase__ = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
lowerCamelCase__ = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__lowerCamelCase , set_alpha_to_zero=__lowerCamelCase , )
torch.manual_seed(0 )
lowerCamelCase__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCamelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
lowerCamelCase__ = CLIPTextModel(__lowerCamelCase )
lowerCamelCase__ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowerCamelCase__ = {
"unet": unet,
"scheduler": scheduler,
"inverse_scheduler": inverse_scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def a__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=0 ) -> Tuple:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"prompt": "a dog and a newt",
"mask_image": mask,
"image_latents": latents,
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=0 ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"image": image,
"source_prompt": "a cat and a frog",
"target_prompt": "a dog and a newt",
"generator": generator,
"num_inference_steps": 2,
"num_maps_per_mask": 2,
"mask_encode_strength": 1.0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=0 ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCamelCase__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCamelCase__ = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" )
if str(__lowerCamelCase ).startswith("mps" ):
lowerCamelCase__ = torch.manual_seed(__lowerCamelCase )
else:
lowerCamelCase__ = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCamelCase__ = {
"image": image,
"prompt": "a cat and a frog",
"generator": generator,
"num_inference_steps": 2,
"inpaint_strength": 1.0,
"guidance_scale": 6.0,
"decode_latents": True,
"output_type": "numpy",
}
return inputs
def a__ ( self : Tuple ) -> str:
'''simple docstring'''
if not hasattr(self.pipeline_class , "_optional_components" ):
return
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
lowerCamelCase__ = self.get_dummy_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe(**__lowerCamelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__lowerCamelCase )
lowerCamelCase__ = self.pipeline_class.from_pretrained(__lowerCamelCase )
pipe_loaded.to(__lowerCamelCase )
pipe_loaded.set_progress_bar_config(disable=__lowerCamelCase )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__lowerCamelCase , __lowerCamelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCamelCase__ = self.get_dummy_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe_loaded(**__lowerCamelCase )[0]
lowerCamelCase__ = np.abs(output - output_loaded ).max()
self.assertLess(__lowerCamelCase , 1E-4 )
def a__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_mask_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.generate_mask(**__lowerCamelCase )
lowerCamelCase__ = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
lowerCamelCase__ = np.array([0] * 9 )
lowerCamelCase__ = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def a__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.invert(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase__ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
def a__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def a__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = "cpu"
lowerCamelCase__ = self.get_dummy_components()
lowerCamelCase__ = {"beta_start": 0.0_0_0_8_5, "beta_end": 0.0_1_2, "beta_schedule": "scaled_linear"}
lowerCamelCase__ = DPMSolverMultistepScheduler(**__lowerCamelCase )
lowerCamelCase__ = DPMSolverMultistepInverseScheduler(**__lowerCamelCase )
lowerCamelCase__ = self.pipeline_class(**__lowerCamelCase )
pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = self.get_dummy_inversion_inputs(__lowerCamelCase )
lowerCamelCase__ = pipe.invert(**__lowerCamelCase ).images
lowerCamelCase__ = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
lowerCamelCase__ = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
lowerCamelCase__ = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__lowerCamelCase , 1E-3 )
@require_torch_gpu
@slow
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def a__ ( self : Optional[int] ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def a__ ( cls : int ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" )
lowerCamelCase__ = raw_image.convert("RGB" ).resize((768, 768) )
lowerCamelCase__ = raw_image
def a__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
lowerCamelCase__ = DDIMScheduler.from_config(pipe.scheduler.config )
lowerCamelCase__ = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "a bowl of fruit"
lowerCamelCase__ = "a bowl of pears"
lowerCamelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCamelCase , target_prompt=__lowerCamelCase , generator=__lowerCamelCase , )
lowerCamelCase__ = pipe.invert(
prompt=__lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCamelCase ).latents
lowerCamelCase__ = pipe(
prompt=__lowerCamelCase , mask_image=__lowerCamelCase , image_latents=__lowerCamelCase , generator=__lowerCamelCase , negative_prompt=__lowerCamelCase , inpaint_strength=0.7 , output_type="numpy" , ).images[0]
lowerCamelCase__ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def a__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = torch.manual_seed(0 )
lowerCamelCase__ = StableDiffusionDiffEditPipeline.from_pretrained(
"stabilityai/stable-diffusion-2-1" , safety_checker=__lowerCamelCase , torch_dtype=torch.floataa )
lowerCamelCase__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
lowerCamelCase__ = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCamelCase__ = "a bowl of fruit"
lowerCamelCase__ = "a bowl of pears"
lowerCamelCase__ = pipe.generate_mask(
image=self.raw_image , source_prompt=__lowerCamelCase , target_prompt=__lowerCamelCase , generator=__lowerCamelCase , )
lowerCamelCase__ = pipe.invert(
prompt=__lowerCamelCase , image=self.raw_image , inpaint_strength=0.7 , generator=__lowerCamelCase , num_inference_steps=25 , ).latents
lowerCamelCase__ = pipe(
prompt=__lowerCamelCase , mask_image=__lowerCamelCase , image_latents=__lowerCamelCase , generator=__lowerCamelCase , negative_prompt=__lowerCamelCase , inpaint_strength=0.7 , num_inference_steps=25 , output_type="numpy" , ).images[0]
lowerCamelCase__ = (
np.array(
load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/diffedit/pears.png" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 187
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.