code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
from __future__ import annotations
import math
class snake_case__:
"""simple docstring"""
def __init__( self : str , SCREAMING_SNAKE_CASE : int ):
lowercase__ : List[str] = size
# approximate the overall size of segment tree with given value
lowercase__ : List[str] = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
lowercase__ : List[Any] = [0 for i in range(0 , 4 * size )]
lowercase__ : Optional[int] = [0 for i in range(0 , 4 * size )] # flag for lazy update
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : int ):
return idx * 2
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int ):
return idx * 2 + 1
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : list[int] ):
if left_element == right_element:
lowercase__ : int = a[left_element - 1]
else:
lowercase__ : int = (left_element + right_element) // 2
self.build(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.build(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE )] )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.flag[idx] is True:
lowercase__ : Any = self.lazy[idx]
lowercase__ : Dict = False
if left_element != right_element:
lowercase__ : Any = self.lazy[idx]
lowercase__ : Tuple = self.lazy[idx]
lowercase__ : List[str] = True
lowercase__ : str = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
lowercase__ : Union[str, Any] = val
if left_element != right_element:
lowercase__ : List[Any] = val
lowercase__ : Optional[Any] = val
lowercase__ : Optional[Any] = True
lowercase__ : Union[str, Any] = True
return True
lowercase__ : Any = (left_element + right_element) // 2
self.update(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.update(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Union[str, Any] = max(
self.segment_tree[self.left(SCREAMING_SNAKE_CASE )] , self.segment_tree[self.right(SCREAMING_SNAKE_CASE )] )
return True
def snake_case ( self : str , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
if self.flag[idx] is True:
lowercase__ : int = self.lazy[idx]
lowercase__ : Tuple = False
if left_element != right_element:
lowercase__ : int = self.lazy[idx]
lowercase__ : Union[str, Any] = self.lazy[idx]
lowercase__ : List[str] = True
lowercase__ : List[Any] = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
lowercase__ : int = (left_element + right_element) // 2
lowercase__ : Optional[int] = self.query(self.left(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = self.query(self.right(SCREAMING_SNAKE_CASE ) , mid + 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return max(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __str__( self : Any ):
return str([self.query(1 , 1 , self.size , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
lowerCAmelCase__ = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8]
lowerCAmelCase__ = 1_5
lowerCAmelCase__ = SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 1_1))
print(segt.query(1, 1, size, 7, 1_2))
segt.update(1, 1, size, 1, 3, 1_1_1)
print(segt.query(1, 1, size, 1, 1_5))
segt.update(1, 1, size, 7, 8, 2_3_5)
print(segt)
| 130
|
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = ['''model.decoder.embed_positions.weights''']
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if "emb" in name:
lowercase__ : int = name.replace("emb" , "model.decoder.embed_tokens" )
if "transformer" in name:
lowercase__ : Any = name.replace("transformer" , "model.decoder" )
if "cross_attention" in name:
lowercase__ : int = name.replace("cross_attention" , "encoder_attn" )
if "linear1" in name:
lowercase__ : int = name.replace("linear1" , "fc1" )
if "linear2" in name:
lowercase__ : int = name.replace("linear2" , "fc2" )
if "norm1" in name:
lowercase__ : Union[str, Any] = name.replace("norm1" , "self_attn_layer_norm" )
if "norm_cross" in name:
lowercase__ : Union[str, Any] = name.replace("norm_cross" , "encoder_attn_layer_norm" )
if "norm2" in name:
lowercase__ : Dict = name.replace("norm2" , "final_layer_norm" )
if "out_norm" in name:
lowercase__ : Dict = name.replace("out_norm" , "model.decoder.layer_norm" )
if "linears" in name:
lowercase__ : Union[str, Any] = name.replace("linears" , "lm_heads" )
if "condition_provider.conditioners.description.output_proj" in name:
lowercase__ : Union[str, Any] = name.replace("condition_provider.conditioners.description.output_proj" , "enc_to_dec_proj" )
return name
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = list(state_dict.keys() )
lowercase__ : Dict = {}
for key in keys:
lowercase__ : Tuple = state_dict.pop(lowerCamelCase__ )
lowercase__ : Union[str, Any] = rename_keys(lowerCamelCase__ )
if "in_proj_weight" in key:
# split fused qkv proj
lowercase__ : Optional[int] = val[:hidden_size, :]
lowercase__ : Optional[int] = val[hidden_size : 2 * hidden_size, :]
lowercase__ : List[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
lowercase__ : Union[str, Any] = val
else:
lowercase__ : List[Any] = val
return state_dict, enc_dec_proj_state_dict
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
if checkpoint == "small":
# default config values
lowercase__ : Optional[Any] = 1_024
lowercase__ : int = 24
lowercase__ : Optional[Any] = 16
elif checkpoint == "medium":
lowercase__ : str = 1_536
lowercase__ : Union[str, Any] = 48
lowercase__ : Optional[int] = 24
elif checkpoint == "large":
lowercase__ : Tuple = 2_048
lowercase__ : Union[str, Any] = 48
lowercase__ : Dict = 32
else:
raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" )
lowercase__ : int = MusicgenDecoderConfig(
hidden_size=lowerCamelCase__ , ffn_dim=hidden_size * 4 , num_hidden_layers=lowerCamelCase__ , num_attention_heads=lowerCamelCase__ , )
return config
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__="cpu" ):
"""simple docstring"""
lowercase__ : List[Any] = MusicGen.get_pretrained(lowerCamelCase__ , device=lowerCamelCase__ )
lowercase__ : str = decoder_config_from_checkpoint(lowerCamelCase__ )
lowercase__ : Optional[Any] = fairseq_model.lm.state_dict()
lowercase__ , lowercase__ : Tuple = rename_state_dict(
lowerCamelCase__ , hidden_size=decoder_config.hidden_size )
lowercase__ : str = TaEncoderModel.from_pretrained("t5-base" )
lowercase__ : Tuple = EncodecModel.from_pretrained("facebook/encodec_32khz" )
lowercase__ : List[str] = MusicgenForCausalLM(lowerCamelCase__ ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
lowercase__ , lowercase__ : List[str] = decoder.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
for key in missing_keys.copy():
if key.startswith(("text_encoder", "audio_encoder") ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" )
if len(lowerCamelCase__ ) > 0:
raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" )
# init the composite model
lowercase__ : Any = MusicgenForConditionalGeneration(text_encoder=lowerCamelCase__ , audio_encoder=lowerCamelCase__ , decoder=lowerCamelCase__ )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(lowerCamelCase__ )
# check we can do a forward pass
lowercase__ : List[str] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
lowercase__ : Any = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
lowercase__ : List[str] = model(input_ids=lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits
if logits.shape != (8, 1, 2_048):
raise ValueError("Incorrect shape for logits" )
# now construct the processor
lowercase__ : List[Any] = AutoTokenizer.from_pretrained("t5-base" )
lowercase__ : Dict = AutoFeatureExtractor.from_pretrained("facebook/encodec_32khz" , padding_side="left" )
lowercase__ : Optional[Any] = MusicgenProcessor(feature_extractor=lowerCamelCase__ , tokenizer=lowerCamelCase__ )
# set the appropriate bos/pad token ids
lowercase__ : List[Any] = 2_048
lowercase__ : List[Any] = 2_048
# set other default generation config params
lowercase__ : str = int(30 * audio_encoder.config.frame_rate )
lowercase__ : List[Any] = True
lowercase__ : Dict = 3.0
if pytorch_dump_folder is not None:
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" )
model.save_pretrained(lowerCamelCase__ )
processor.save_pretrained(lowerCamelCase__ )
if repo_id:
logger.info(F"""Pushing model {checkpoint} to {repo_id}""" )
model.push_to_hub(lowerCamelCase__ )
processor.push_to_hub(lowerCamelCase__ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint''',
default='''small''',
type=str,
help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''',
)
parser.add_argument(
'''--pytorch_dump_folder''',
required=True,
default=None,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
parser.add_argument(
'''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 130
| 1
|
"""simple docstring"""
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__A : List[Any] = '''%20'''.join(argv[1:]) if len(argv) > 1 else quote(str(input('''Search: ''')))
print('''Googling.....''')
__A : Optional[Any] = F'''https://www.google.com/search?q={query}&num=100'''
__A : Optional[int] = requests.get(
url,
headers={'''User-Agent''': str(UserAgent().random)},
)
try:
__A : Optional[int] = (
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''yuRUbf'''})
.find('''a''')
.get('''href''')
)
except AttributeError:
__A : str = parse_qs(
BeautifulSoup(res.text, '''html.parser''')
.find('''div''', attrs={'''class''': '''kCrYT'''})
.find('''a''')
.get('''href''')
)['''url'''][0]
webbrowser.open(link)
| 371
|
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_="last" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , ):
UpperCamelCase : Union[str, Any] = parent
UpperCamelCase : str = batch_size
UpperCamelCase : int = seq_length
UpperCamelCase : Optional[Any] = is_training
UpperCamelCase : Any = use_input_lengths
UpperCamelCase : Tuple = use_token_type_ids
UpperCamelCase : List[Any] = use_labels
UpperCamelCase : Union[str, Any] = gelu_activation
UpperCamelCase : Dict = sinusoidal_embeddings
UpperCamelCase : Optional[int] = causal
UpperCamelCase : List[Any] = asm
UpperCamelCase : int = n_langs
UpperCamelCase : Optional[Any] = vocab_size
UpperCamelCase : str = n_special
UpperCamelCase : Dict = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : Any = type_sequence_label_size
UpperCamelCase : str = initializer_range
UpperCamelCase : str = num_labels
UpperCamelCase : Union[str, Any] = num_choices
UpperCamelCase : List[str] = summary_type
UpperCamelCase : int = use_proj
UpperCamelCase : List[str] = scope
UpperCamelCase : Dict = bos_token_id
def a_ ( self ):
UpperCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
if self.use_input_lengths:
UpperCamelCase : str = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCamelCase : Tuple = None
if self.use_token_type_ids:
UpperCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
UpperCamelCase : int = None
UpperCamelCase : Dict = None
UpperCamelCase : str = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCamelCase : Dict = ids_tensor([self.batch_size] , 2 ).float()
UpperCamelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
UpperCamelCase : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a_ ( self ):
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Optional[int] = XLMModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ , lengths=SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ , langs=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Optional[Any] = XLMWithLMHeadModel(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : List[str] = XLMForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : int = XLMForQuestionAnswering(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(
SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , p_mask=SCREAMING_SNAKE_CASE_ , )
UpperCamelCase : Any = model(
SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , cls_index=SCREAMING_SNAKE_CASE_ , is_impossible=SCREAMING_SNAKE_CASE_ , )
((UpperCamelCase) , ) : Union[str, Any] = result_with_labels.to_tuple()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ )
((UpperCamelCase) , ) : Tuple = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : Union[str, Any] = XLMForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : int = self.num_labels
UpperCamelCase : int = XLMForTokenClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Optional[int] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
UpperCamelCase : List[Any] = self.num_choices
UpperCamelCase : Tuple = XLMForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCamelCase : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def a_ ( self ):
UpperCamelCase : int = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : List[Any] = config_and_inputs
UpperCamelCase : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowercase : Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase : List[Any] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase : Optional[Any] = (
{
'feature-extraction': XLMModel,
'fill-mask': XLMWithLMHeadModel,
'question-answering': XLMForQuestionAnsweringSimple,
'text-classification': XLMForSequenceClassification,
'text-generation': XLMWithLMHeadModel,
'token-classification': XLMForTokenClassification,
'zero-shot': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
UpperCamelCase : Union[str, Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
UpperCamelCase : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def a_ ( self ):
UpperCamelCase : List[Any] = XLMModelTester(self )
UpperCamelCase : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , emb_dim=37 )
def a_ ( self ):
self.config_tester.run_common_tests()
def a_ ( self ):
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*SCREAMING_SNAKE_CASE_ )
def a_ ( self ):
UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 ):
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
[isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for iter_attentions in attentions] , [True] * len(SCREAMING_SNAKE_CASE_ ) )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(SCREAMING_SNAKE_CASE_ ):
# adds PAD dummy token
UpperCamelCase : int = min_length + idx + 1
UpperCamelCase : Tuple = min_length + idx + 1
UpperCamelCase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(SCREAMING_SNAKE_CASE_ ) )
def a_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=1 ):
self.assertIsInstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
[isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for iter_hidden_states in hidden_states] , [True] * len(SCREAMING_SNAKE_CASE_ ) , )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(SCREAMING_SNAKE_CASE_ ):
# adds PAD dummy token
UpperCamelCase : List[str] = min_length + idx + 1
UpperCamelCase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(SCREAMING_SNAKE_CASE_ ) , )
pass
@slow
def a_ ( self ):
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : str = XLMModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
@slow
def a_ ( self ):
UpperCamelCase : Dict = XLMWithLMHeadModel.from_pretrained("""xlm-mlm-en-2048""" )
model.to(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = torch.tensor([[14, 447]] , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) # the president
UpperCamelCase : List[Any] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
UpperCamelCase : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ , do_sample=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , SCREAMING_SNAKE_CASE_ )
| 27
| 0
|
"""simple docstring"""
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def a__ ( lowerCAmelCase ) -> Any:
# A local function to see if a dot lands in the circle.
def is_in_circle(lowerCAmelCase , lowerCAmelCase ) -> bool:
UpperCAmelCase__ : Optional[int] = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase__ : Tuple = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(lowerCAmelCase ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase__ : Tuple = proportion * 4
print(F"""The estimated value of pi is {pi_estimate}""" )
print(F"""The numpy value of pi is {pi}""" )
print(F"""The total error is {abs(pi - pi_estimate )}""" )
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0.0 , lowerCAmelCase = 1.0 , ) -> float:
return mean(
function_to_integrate(uniform(lowerCAmelCase , lowerCAmelCase ) ) for _ in range(lowerCAmelCase ) ) * (max_value - min_value)
def a__ ( lowerCAmelCase , lowerCAmelCase = 0.0 , lowerCAmelCase = 1.0 ) -> None:
def identity_function(lowerCAmelCase ) -> float:
return x
UpperCAmelCase__ : Union[str, Any] = area_under_curve_estimator(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[Any] = (max_value * max_value - min_value * min_value) / 2
print("""******************""" )
print(F"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {expected_value}""" )
print(F"""Total error is {abs(estimated_value - expected_value )}""" )
print("""******************""" )
def a__ ( lowerCAmelCase ) -> None:
def function_to_integrate(lowerCAmelCase ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase__ : Any = area_under_curve_estimator(
lowerCAmelCase , lowerCAmelCase , 0.0 , 2.0 )
print("""******************""" )
print("""Estimating pi using area_under_curve_estimator""" )
print(F"""Estimated value is {estimated_value}""" )
print(F"""Expected value is {pi}""" )
print(F"""Total error is {abs(estimated_value - pi )}""" )
print("""******************""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 171
|
"""simple docstring"""
import numpy as np
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1E-12 , lowerCAmelCase = 1_00 , ) -> tuple[float, np.ndarray]:
assert np.shape(lowerCAmelCase )[0] == np.shape(lowerCAmelCase )[1]
# Ensure proper dimensionality.
assert np.shape(lowerCAmelCase )[0] == np.shape(lowerCAmelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowerCAmelCase ) == np.iscomplexobj(lowerCAmelCase )
UpperCAmelCase__ : Optional[Any] = np.iscomplexobj(lowerCAmelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowerCAmelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Optional[int] = 1E12
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : int = np.dot(lowerCAmelCase , lowerCAmelCase )
# Normalize the resulting output vector.
UpperCAmelCase__ : Optional[Any] = w / np.linalg.norm(lowerCAmelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : List[Any] = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : Optional[Any] = np.dot(lowerCAmelCase , np.dot(lowerCAmelCase , lowerCAmelCase ) )
# Check convergence.
UpperCAmelCase__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : List[Any] = lambda_
if is_complex:
UpperCAmelCase__ : Any = np.real(lambda_ )
return lambda_, vector
def a__ ( ) -> None:
UpperCAmelCase__ : Tuple = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : int = np.array([41, 4, 20] )
UpperCAmelCase__ : str = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : Any = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : Dict = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : List[str] = real_input_matrix
UpperCAmelCase__ : Any = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : List[Any] = complex_input_matrix
UpperCAmelCase__ : int = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : int = power_iteration(lowerCAmelCase , lowerCAmelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = np.linalg.eigh(lowerCAmelCase )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : str = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : List[Any] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowerCAmelCase ) - np.abs(lowerCAmelCase ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 171
| 1
|
from sklearn.metrics import mean_squared_error
import datasets
_UpperCAmelCase : Optional[Any] = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_UpperCAmelCase : Optional[Any] = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_UpperCAmelCase : Tuple = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def __UpperCamelCase ( self , A_ , A_ , A_=None , A_="uniform_average" , A_=True ) -> str:
"""simple docstring"""
UpperCamelCase = mean_squared_error(
A_ , A_ , sample_weight=A_ , multioutput=A_ , squared=A_ )
return {"mse": mse}
| 350
|
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowercase :
def __init__( self , A_ , A_=13 , A_=30 , A_=2 , A_=3 , A_=True , A_=True , A_=32 , A_=2 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_=3 , A_=0.6 , A_=None , ) -> str:
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = type_sequence_label_size
UpperCamelCase = initializer_range
UpperCamelCase = mask_ratio
UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
UpperCamelCase = (image_size // patch_size) ** 2
UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = TFViTMAEModel(config=A_ )
UpperCamelCase = model(A_ , training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Tuple:
"""simple docstring"""
UpperCamelCase = TFViTMAEForPreTraining(A_ )
UpperCamelCase = model(A_ , training=A_ )
# expected sequence length = num_patches
UpperCamelCase = (self.image_size // self.patch_size) ** 2
UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
UpperCamelCase = 1
UpperCamelCase = TFViTMAEForPreTraining(A_ )
UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase = model(A_ , training=A_ )
UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
((UpperCamelCase) , (UpperCamelCase) , (UpperCamelCase)) = config_and_inputs
UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
__lowercase : Union[str, Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
__lowercase : Optional[int] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
__lowercase : Optional[Any] = False
__lowercase : Optional[Any] = False
__lowercase : Dict = False
__lowercase : List[str] = False
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = TFViTMAEModelTester(self )
UpperCamelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViTMAE does not use inputs_embeds' )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
pass
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , tf.keras.layers.Layer ) )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __UpperCamelCase ( self ) -> str:
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*A_ )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = copy.deepcopy(self._prepare_for_class(A_ , A_ ) )
UpperCamelCase = model(**A_ , noise=A_ )
UpperCamelCase = outputs_dict[0].numpy()
UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# make the mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(A_ ):
UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(A_ ):
UpperCamelCase = v.numpy()
else:
UpperCamelCase = np.array(A_ )
return inputs_np_dict
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = prepare_numpy_arrays(A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = model(**A_ , noise=A_ )
self.assert_outputs_same(A_ , A_ )
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> List[Any]:
"""simple docstring"""
# make masks reproducible
np.random.seed(2 )
UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.constant(A_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
UpperCamelCase = tf_noise
super().check_pt_tf_models(A_ , A_ , A_ )
def __UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(A_ )
if module_member_name.endswith('MainLayer' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )]
for module_member in (getattr(A_ , A_ ),)
if isinstance(A_ , A_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(A_ , '_keras_serializable' , A_ )
}
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
UpperCamelCase = tf.convert_to_tensor(A_ )
inputs_dict.update({'noise': noise} )
for main_layer_class in tf_main_layer_classes:
UpperCamelCase = main_layer_class(A_ )
UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
UpperCamelCase = tf.keras.Model(A_ , outputs=main_layer(A_ ) )
UpperCamelCase = model(A_ )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase = os.path.join(A_ , 'keras_model.h5' )
model.save(A_ )
UpperCamelCase = tf.keras.models.load_model(
A_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(A_ , tf.keras.Model )
UpperCamelCase = model(A_ )
self.assert_outputs_same(A_ , A_ )
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = outputs.last_hidden_state.numpy()
UpperCamelCase = 0
else:
UpperCamelCase = outputs.logits.numpy()
UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(A_ , saved_model=A_ )
UpperCamelCase = model_class.from_pretrained(A_ )
UpperCamelCase = model(A_ , noise=A_ )
if model_class.__name__ == "TFViTMAEModel":
UpperCamelCase = after_outputs['last_hidden_state'].numpy()
UpperCamelCase = 0
else:
UpperCamelCase = after_outputs['logits'].numpy()
UpperCamelCase = 0
UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(A_ , 1e-5 )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
# make mask reproducible
np.random.seed(2 )
UpperCamelCase , UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
UpperCamelCase = model_class(A_ )
UpperCamelCase = self._prepare_for_class(A_ , A_ )
UpperCamelCase = model(A_ , noise=A_ )
UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(A_ )
UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
UpperCamelCase = model_class.from_config(model.config )
UpperCamelCase = new_model(A_ ) # Build model
new_model.set_weights(model.get_weights() )
UpperCamelCase = new_model(A_ , noise=A_ )
self.assert_outputs_same(A_ , A_ )
@unittest.skip(
reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' )
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
pass
@unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' )
def __UpperCamelCase ( self ) -> Optional[Any]:
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(A_ )
def A ( ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
UpperCamelCase = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' )
UpperCamelCase = self.default_image_processor
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=A_ , return_tensors='tf' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
UpperCamelCase = ViTMAEConfig()
UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
UpperCamelCase = model(**A_ , noise=A_ )
# verify the logits
UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , A_ )
UpperCamelCase = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , A_ , atol=1e-4 )
| 110
| 0
|
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
A__: Dict = True
except (ImportError, AttributeError):
A__: List[str] = object
def SCREAMING_SNAKE_CASE_ ( *_UpperCAmelCase : int ,**_UpperCAmelCase : Dict ) -> str:
pass
A__: str = False
A__: Tuple = logging.get_logger('''transformers-cli/serving''')
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : List[Any] ) -> List[Any]:
_a : Optional[Any] =pipeline(
task=args.task ,model=args.model if args.model else None ,config=args.config ,tokenizer=args.tokenizer ,device=args.device ,)
return ServeCommand(_lowercase ,args.host ,args.port ,args.workers )
class A__ ( _lowerCAmelCase ):
__UpperCamelCase : dict
class A__ ( _lowerCAmelCase ):
__UpperCamelCase : List[str]
__UpperCamelCase : Optional[List[int]]
class A__ ( _lowerCAmelCase ):
__UpperCamelCase : str
class A__ ( _lowerCAmelCase ):
__UpperCamelCase : Any
class A__ ( _lowerCAmelCase ):
@staticmethod
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE :ArgumentParser ) -> Tuple:
'''simple docstring'''
_a : Any =parser.add_parser(
"""serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" )
serve_parser.add_argument(
"""--task""" , type=SCREAMING_SNAKE_CASE , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , )
serve_parser.add_argument("""--host""" , type=SCREAMING_SNAKE_CASE , default="""localhost""" , help="""Interface the server will listen on.""" )
serve_parser.add_argument("""--port""" , type=SCREAMING_SNAKE_CASE , default=8_8_8_8 , help="""Port the serving will listen to.""" )
serve_parser.add_argument("""--workers""" , type=SCREAMING_SNAKE_CASE , default=1 , help="""Number of http workers""" )
serve_parser.add_argument("""--model""" , type=SCREAMING_SNAKE_CASE , help="""Model's name or path to stored model.""" )
serve_parser.add_argument("""--config""" , type=SCREAMING_SNAKE_CASE , help="""Model's config name or path to stored model.""" )
serve_parser.add_argument("""--tokenizer""" , type=SCREAMING_SNAKE_CASE , help="""Tokenizer name to use.""" )
serve_parser.add_argument(
"""--device""" , type=SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
serve_parser.set_defaults(func=SCREAMING_SNAKE_CASE )
def __init__( self :int , SCREAMING_SNAKE_CASE :Pipeline , SCREAMING_SNAKE_CASE :str , SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :int ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =pipeline
_a : Optional[Any] =host
_a : Optional[Any] =port
_a : Tuple =workers
if not _serve_dependencies_installed:
raise RuntimeError(
"""Using serve command requires FastAPI and uvicorn. """
"""Please install transformers with [serving]: pip install \"transformers[serving]\"."""
"""Or install FastAPI and uvicorn separately.""" )
else:
logger.info(f"Serving model over {host}:{port}" )
_a : List[str] =FastAPI(
routes=[
APIRoute(
"""/""" , self.model_info , response_model=SCREAMING_SNAKE_CASE , response_class=SCREAMING_SNAKE_CASE , methods=["""GET"""] , ),
APIRoute(
"""/tokenize""" , self.tokenize , response_model=SCREAMING_SNAKE_CASE , response_class=SCREAMING_SNAKE_CASE , methods=["""POST"""] , ),
APIRoute(
"""/detokenize""" , self.detokenize , response_model=SCREAMING_SNAKE_CASE , response_class=SCREAMING_SNAKE_CASE , methods=["""POST"""] , ),
APIRoute(
"""/forward""" , self.forward , response_model=SCREAMING_SNAKE_CASE , response_class=SCREAMING_SNAKE_CASE , methods=["""POST"""] , ),
] , timeout=6_0_0 , )
def __UpperCAmelCase ( self :Optional[Any] ) -> Dict:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def __UpperCAmelCase ( self :Any ) -> Any:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def __UpperCAmelCase ( self :Tuple , SCREAMING_SNAKE_CASE :str = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE :bool = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) ) -> Union[str, Any]:
'''simple docstring'''
try:
_a : Dict =self._pipeline.tokenizer.tokenize(SCREAMING_SNAKE_CASE )
if return_ids:
_a : int =self._pipeline.tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE )
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE , tokens_ids=SCREAMING_SNAKE_CASE )
else:
return ServeTokenizeResult(tokens=SCREAMING_SNAKE_CASE )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"""model""": """""", """error""": str(SCREAMING_SNAKE_CASE )} )
def __UpperCAmelCase ( self :Optional[Any] , SCREAMING_SNAKE_CASE :List[int] = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE :bool = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE :bool = Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) , ) -> Union[str, Any]:
'''simple docstring'''
try:
_a : Dict =self._pipeline.tokenizer.decode(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return ServeDeTokenizeResult(model="""""" , text=SCREAMING_SNAKE_CASE )
except Exception as e:
raise HTTPException(status_code=5_0_0 , detail={"""model""": """""", """error""": str(SCREAMING_SNAKE_CASE )} )
async def __UpperCAmelCase ( self :Dict , SCREAMING_SNAKE_CASE :Optional[int]=Body(SCREAMING_SNAKE_CASE , embed=SCREAMING_SNAKE_CASE ) ) -> Union[str, Any]:
'''simple docstring'''
# Check we don't have empty string
if len(SCREAMING_SNAKE_CASE ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
_a : List[str] =self._pipeline(SCREAMING_SNAKE_CASE )
return ServeForwardResult(output=SCREAMING_SNAKE_CASE )
except Exception as e:
raise HTTPException(5_0_0 , {"""error""": str(SCREAMING_SNAKE_CASE )} )
| 276
|
"""simple docstring"""
from math import factorial
class lowerCamelCase :
'''simple docstring'''
def __init__( self: Optional[int] , snake_case: Dict , snake_case: int ) -> Tuple:
snake_case_ :List[Any] = real
if isinstance(snake_case , snake_case ):
snake_case_ :Tuple = [1] * rank
else:
snake_case_ :Optional[Any] = rank
def __repr__( self: List[str] ) -> Tuple:
return (
f"""{self.real}+"""
f"""{'+'.join(str(snake_case )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"""
)
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
snake_case_ :Any = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , snake_case )
def __add__( self: Optional[int] , snake_case: Dict ) -> List[str]:
if not isinstance(snake_case , snake_case ):
return Dual(self.real + other , self.duals )
snake_case_ :List[Any] = self.duals.copy()
snake_case_ :Tuple = other.duals.copy()
if len(snake_case ) > len(snake_case ):
o_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
elif len(snake_case ) < len(snake_case ):
s_dual.extend([1] * (len(snake_case ) - len(snake_case )) )
snake_case_ :Dict = []
for i in range(len(snake_case ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , snake_case )
_A : str = __add__
def __sub__( self: Tuple , snake_case: Union[str, Any] ) -> Tuple:
return self + other * -1
def __mul__( self: str , snake_case: Tuple ) -> Optional[Any]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Dict = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , snake_case )
snake_case_ :int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , snake_case )
_A : int = __mul__
def __truediv__( self: List[str] , snake_case: List[str] ) -> List[str]:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , snake_case )
raise ValueError
def __floordiv__( self: int , snake_case: List[Any] ) -> Any:
if not isinstance(snake_case , snake_case ):
snake_case_ :Optional[int] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , snake_case )
raise ValueError
def __pow__( self: Optional[Any] , snake_case: Optional[int] ) -> List[Any]:
if n < 0 or isinstance(snake_case , snake_case ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case_ :str = self
for _ in range(n - 1 ):
x *= self
return x
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
if not callable(_lowercase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(_lowercase, (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(_lowercase, _lowercase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case_ :Optional[Any] = Dual(_lowercase, 1 )
snake_case_ :List[Any] = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def A_ ( _lowercase ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66
| 0
|
'''simple docstring'''
import numpy as np
def __magic_name__ ( A ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class lowerCamelCase ( metaclass=__lowerCAmelCase ):
snake_case_ = ['''note_seq''']
def __init__( self, *lowercase_, **lowercase_ ) -> str:
requires_backends(self, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> Union[str, Any]:
requires_backends(cls, ['note_seq'] )
@classmethod
def _lowerCamelCase ( cls, *lowercase_, **lowercase_ ) -> List[Any]:
requires_backends(cls, ['note_seq'] )
| 332
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase : Dict = {
"configuration_mvp": ["MVP_PRETRAINED_CONFIG_ARCHIVE_MAP", "MvpConfig", "MvpOnnxConfig"],
"tokenization_mvp": ["MvpTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] = ["MvpTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Dict = [
"MVP_PRETRAINED_MODEL_ARCHIVE_LIST",
"MvpForCausalLM",
"MvpForConditionalGeneration",
"MvpForQuestionAnswering",
"MvpForSequenceClassification",
"MvpModel",
"MvpPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 156
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 296
| 0
|
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowercase = 42
lowercase = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
lowercase = 1
@register_to_config
def __init__( self , A = 2000 , A = 0.1_5 , A = 0.0_1 , A = 1_3_4_8.0 , A = 1e-5 , A = 1 , ) -> List[str]:
# standard deviation of the initial noise distribution
UpperCAmelCase : Union[str, Any] = sigma_max
# setable values
UpperCAmelCase : str = None
self.set_sigmas(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowercase( self , A , A = None ) -> List[Any]:
return sample
def _lowercase( self , A , A = None , A = None ) -> Tuple:
UpperCAmelCase : int = sampling_eps if sampling_eps is not None else self.config.sampling_eps
UpperCAmelCase : List[str] = torch.linspace(1 , __lowerCamelCase , __lowerCamelCase , device=__lowerCamelCase )
def _lowercase( self , A , A = None , A = None , A = None ) -> Optional[int]:
UpperCAmelCase : Optional[int] = sigma_min if sigma_min is not None else self.config.sigma_min
UpperCAmelCase : List[Any] = sigma_max if sigma_max is not None else self.config.sigma_max
UpperCAmelCase : Tuple = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__lowerCamelCase , __lowerCamelCase )
UpperCAmelCase : Optional[Any] = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
UpperCAmelCase : List[Any] = torch.exp(torch.linspace(math.log(__lowerCamelCase ) , math.log(__lowerCamelCase ) , __lowerCamelCase ) )
UpperCAmelCase : Optional[int] = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def _lowercase( self , A , A ) -> str:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def _lowercase( self , A , A , A , A = None , A = True , ) -> Any:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler""" )
UpperCAmelCase : Any = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
UpperCAmelCase : int = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
UpperCAmelCase : str = timesteps.to(self.discrete_sigmas.device )
UpperCAmelCase : List[str] = self.discrete_sigmas[timesteps].to(sample.device )
UpperCAmelCase : List[Any] = self.get_adjacent_sigma(__lowerCamelCase , __lowerCamelCase ).to(sample.device )
UpperCAmelCase : Any = torch.zeros_like(__lowerCamelCase )
UpperCAmelCase : Optional[int] = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
UpperCAmelCase : int = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
UpperCAmelCase : Any = diffusion.unsqueeze(-1 )
UpperCAmelCase : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
UpperCAmelCase : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=__lowerCamelCase , device=sample.device , dtype=sample.dtype )
UpperCAmelCase : List[str] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
UpperCAmelCase : Dict = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__lowerCamelCase , prev_sample_mean=__lowerCamelCase )
def _lowercase( self , A , A , A = None , A = True , ) -> str:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
UpperCAmelCase : Any = randn_tensor(sample.shape , layout=sample.layout , generator=__lowerCamelCase ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
UpperCAmelCase : Tuple = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase : Optional[int] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
UpperCAmelCase : int = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
UpperCAmelCase : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
UpperCAmelCase : List[Any] = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
UpperCAmelCase : int = step_size.unsqueeze(-1 )
UpperCAmelCase : Union[str, Any] = sample + step_size * model_output
UpperCAmelCase : str = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__lowerCamelCase )
def _lowercase( self , A , A , A , ) -> Union[str, Any]:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
UpperCAmelCase : List[Any] = timesteps.to(original_samples.device )
UpperCAmelCase : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
UpperCAmelCase : Tuple = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__lowerCamelCase ) * sigmas[:, None, None, None]
)
UpperCAmelCase : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self ) -> Any:
return self.config.num_train_timesteps
| 367
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : str = {
"""facebook/levit-128S""": """https://huggingface.co/facebook/levit-128S/resolve/main/config.json""",
# See all LeViT models at https://huggingface.co/models?filter=levit
}
class UpperCamelCase_ ( __magic_name__ ):
lowercase = 'levit'
def __init__( self , A=224 , A=3 , A=3 , A=2 , A=1 , A=16 , A=[128, 256, 384] , A=[4, 8, 12] , A=[4, 4, 4] , A=[16, 16, 16] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.0_2 , **A , ) -> int:
super().__init__(**A )
UpperCAmelCase : Any = image_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Tuple = kernel_size
UpperCAmelCase : Optional[int] = stride
UpperCAmelCase : Dict = padding
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : List[Any] = num_attention_heads
UpperCAmelCase : Optional[int] = depths
UpperCAmelCase : Any = key_dim
UpperCAmelCase : str = drop_path_rate
UpperCAmelCase : List[Any] = patch_size
UpperCAmelCase : str = attention_ratio
UpperCAmelCase : Optional[Any] = mlp_ratio
UpperCAmelCase : Dict = initializer_range
UpperCAmelCase : int = [
["""Subsample""", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["""Subsample""", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
class UpperCamelCase_ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowercase( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowercase( self ) -> float:
return 1e-4
| 338
| 0
|
import itertools
import random
import unittest
import numpy as np
from transformers import BatchFeature, SpeechTaFeatureExtractor
from transformers.testing_utils import require_torch
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
lowerCamelCase = random.Random()
def UpperCAmelCase__ ( _A : Any , _A : List[str]=1.0 , _A : Any=None , _A : Optional[int]=None ):
'''simple docstring'''
if rng is None:
a__ =global_rng
a__ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self, lowercase_, lowercase_=7, lowercase_=400, lowercase_=2000, lowercase_=1, lowercase_=0.0, lowercase_=16000, lowercase_=True, lowercase_=80, lowercase_=16, lowercase_=64, lowercase_="hann_window", lowercase_=80, lowercase_=7600, lowercase_=1E-10, lowercase_=True, ) -> Optional[int]:
"""simple docstring"""
a__ =parent
a__ =batch_size
a__ =min_seq_length
a__ =max_seq_length
a__ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
a__ =feature_size
a__ =padding_value
a__ =sampling_rate
a__ =do_normalize
a__ =num_mel_bins
a__ =hop_length
a__ =win_length
a__ =win_function
a__ =fmin
a__ =fmax
a__ =mel_floor
a__ =return_attention_mask
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def _UpperCAmelCase ( self, lowercase_=False, lowercase_=False ) -> Dict:
"""simple docstring"""
def _flatten(lowercase_ ):
return list(itertools.chain(*__a ) )
if equal_length:
a__ =floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
a__ =[
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
a__ =[np.asarray(__a ) for x in speech_inputs]
return speech_inputs
def _UpperCAmelCase ( self, lowercase_=False, lowercase_=False ) -> Dict:
"""simple docstring"""
if equal_length:
a__ =[floats_list((self.max_seq_length, self.num_mel_bins) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
a__ =[
floats_list((x, self.num_mel_bins) )
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff )
]
if numpify:
a__ =[np.asarray(__a ) for x in speech_inputs]
return speech_inputs
@require_torch
class __magic_name__ ( lowerCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = SpeechTaFeatureExtractor
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =SpeechTaFeatureExtractionTester(self )
def _UpperCAmelCase ( self, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
self.assertTrue(np.all(np.mean(__a, axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(__a, axis=0 ) - 1 ) < 1E-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =[np.asarray(__a ) for speech_input in speech_inputs]
# Test not batched input
a__ =feat_extract(speech_inputs[0], return_tensors='''np''' ).input_values
a__ =feat_extract(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
# Test batched
a__ =feat_extract(__a, return_tensors='''np''' ).input_values
a__ =feat_extract(__a, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__a, __a ):
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =['longest', 'max_length', 'do_not_pad']
a__ =[None, 1600, None]
for max_length, padding in zip(__a, __a ):
a__ =feat_extract(__a, padding=__a, max_length=__a, return_tensors='''np''' )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self.assertTrue(input_values[0][1000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _UpperCAmelCase ( self ) -> int:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =range(800, 1400, 200 )
a__ =[floats_list((1, x) )[0] for x in lengths]
a__ =['longest', 'max_length', 'do_not_pad']
a__ =[None, 1600, None]
for max_length, padding in zip(__a, __a ):
a__ =feat_extract(__a, max_length=__a, padding=__a )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1000] )
self._check_zero_mean_unit_variance(input_values[2][:1200] )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =feat_extract(
__a, truncation=__a, max_length=1000, padding='''max_length''', return_tensors='''np''' )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =feat_extract(
__a, truncation=__a, max_length=1000, padding='''longest''', return_tensors='''np''' )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1000) )
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =feat_extract(
__a, truncation=__a, max_length=2000, padding='''longest''', return_tensors='''np''' )
a__ =processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1200) )
def _UpperCAmelCase ( self ) -> List[str]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
a__ =np.random.rand(100 ).astype(np.floataa )
a__ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
a__ =feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
a__ =feature_extractor.pad([{'''input_values''': inputs}], return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
a__ =[floats_list((1, x) )[0] for x in range(800, 1400, 200 )]
a__ =[np.asarray(__a ) for speech_input in speech_inputs]
# Test feature size
a__ =feature_extractor(audio_target=__a, padding=__a, return_tensors='''np''' ).input_values
self.assertTrue(input_values.ndim == 3 )
self.assertTrue(input_values.shape[-1] == feature_extractor.num_mel_bins )
# Test not batched input
a__ =feature_extractor(speech_inputs[0], return_tensors='''np''' ).input_values
a__ =feature_extractor(np_speech_inputs[0], return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
# Test batched
a__ =feature_extractor(__a, return_tensors='''np''' ).input_values
a__ =feature_extractor(__a, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__a, __a ):
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
a__ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
a__ =np.asarray(__a )
a__ =feature_extractor(__a, return_tensors='''np''' ).input_values
a__ =feature_extractor(__a, return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(__a, __a ):
self.assertTrue(np.allclose(__a, __a, atol=1E-3 ) )
def _UpperCAmelCase ( self ) -> Tuple:
"""simple docstring"""
a__ =self.feat_extract_tester.prepare_inputs_for_target()
a__ =self.feature_extraction_class(**self.feat_extract_dict )
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__a ) == len(__a ) for x, y in zip(__a, processed_features[input_name] ) ) )
a__ =self.feat_extract_tester.prepare_inputs_for_target(equal_length=__a )
a__ =BatchFeature({input_name: speech_inputs}, tensor_type='''np''' )
a__ =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__ =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
a__ =self.feat_extract_tester.prepare_inputs_for_target(equal_length=__a )
a__ =self.feature_extraction_class(**self.feat_extract_dict )
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs}, tensor_type='''pt''' )
a__ =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
a__ =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.num_mel_bins) )
@require_torch
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =self.feature_extraction_class(**self.feat_extract_dict )
a__ =self.feat_extract_tester.prepare_inputs_for_target()
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs} )
a__ =feat_extract.num_mel_bins # hack!
a__ =feat_extract.pad(__a, padding='''longest''', return_tensors='''np''' )[input_name]
a__ =feat_extract.pad(__a, padding='''longest''', return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 )
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =self.feat_extract_dict
a__ =True
a__ =self.feature_extraction_class(**__a )
a__ =self.feat_extract_tester.prepare_inputs_for_target()
a__ =[len(__a ) for x in speech_inputs]
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs} )
a__ =feat_extract.num_mel_bins # hack!
a__ =feat_extract.pad(__a, padding='''longest''', return_tensors='''np''' )
self.assertIn('''attention_mask''', __a )
self.assertListEqual(list(processed.attention_mask.shape ), list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist(), __a )
def _UpperCAmelCase ( self ) -> str:
"""simple docstring"""
a__ =self.feat_extract_dict
a__ =True
a__ =self.feature_extraction_class(**__a )
a__ =self.feat_extract_tester.prepare_inputs_for_target()
a__ =[len(__a ) for x in speech_inputs]
a__ =feat_extract.model_input_names[0]
a__ =BatchFeature({input_name: speech_inputs} )
a__ =min(__a )
a__ =feat_extract.num_mel_bins # hack!
a__ =feat_extract.pad(
__a, padding='''max_length''', max_length=__a, truncation=__a, return_tensors='''np''' )
self.assertIn('''attention_mask''', __a )
self.assertListEqual(
list(processed_pad.attention_mask.shape ), [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist(), [max_length for x in speech_inputs] )
def _UpperCAmelCase ( self, lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
from datasets import load_dataset
a__ =load_dataset('''hf-internal-testing/librispeech_asr_dummy''', '''clean''', split='''validation''' )
# automatic decoding with librispeech
a__ =ds.sort('''id''' ).select(range(__a ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def _UpperCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
a__ =torch.tensor(
[2.3804E-03, 2.0752E-03, 1.9836E-03, 2.1057E-03, 1.6174E-03,
3.0518E-04, 9.1553E-05, 3.3569E-04, 9.7656E-04, 1.8311E-03,
2.0142E-03, 2.1057E-03, 1.7395E-03, 4.5776E-04, -3.9673E-04,
4.5776E-04, 1.0071E-03, 9.1553E-05, 4.8828E-04, 1.1597E-03,
7.3242E-04, 9.4604E-04, 1.8005E-03, 1.8311E-03, 8.8501E-04,
4.2725E-04, 4.8828E-04, 7.3242E-04, 1.0986E-03, 2.1057E-03] )
# fmt: on
a__ =self._load_datasamples(1 )
a__ =SpeechTaFeatureExtractor()
a__ =feature_extractor(__a, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 93680) )
self.assertTrue(torch.allclose(input_values[0, :30], __a, atol=1E-6 ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
a__ =torch.tensor(
[-2.6870, -3.0104, -3.1356, -3.5352, -3.0044, -3.0353, -3.4719, -3.6777,
-3.1520, -2.9435, -2.6553, -2.8795, -2.9944, -2.5921, -3.0279, -3.0386,
-3.0864, -3.1291, -3.2353, -2.7444, -2.6831, -2.7287, -3.1761, -3.1571,
-3.2726, -3.0582, -3.1007, -3.4533, -3.4695, -3.0998] )
# fmt: on
a__ =self._load_datasamples(1 )
a__ =SpeechTaFeatureExtractor()
a__ =feature_extractor(audio_target=__a, return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape, (1, 366, 80) )
self.assertTrue(torch.allclose(input_values[0, 0, :30], __a, atol=1E-4 ) )
| 188
|
'''simple docstring'''
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
__lowercase : Tuple = pytest.mark.integration
__lowercase : Optional[int] = {'comet'}
__lowercase : List[str] = importlib.util.find_spec('fairseq') is not None
__lowercase : str = {'code_eval'}
__lowercase : List[Any] = os.name == 'nt'
__lowercase : Optional[Any] = {'bertscore', 'frugalscore', 'perplexity'}
__lowercase : Optional[Any] = importlib.util.find_spec('transformers') is not None
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : int , _SCREAMING_SNAKE_CASE : List[Any] ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest('"test requires Fairseq"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Union[str, Any] ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest('"test requires transformers"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
@wraps(_SCREAMING_SNAKE_CASE )
def wrapper(self : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest('"test not supported on Windows"' )
else:
test_case(self , _SCREAMING_SNAKE_CASE )
return wrapper
def lowerCamelCase ():
__a : List[Any] = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob('./metrics/*/' )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
@local
class __UpperCamelCase ( parameterized.TestCase ):
A_ = {}
A_ = None
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:load_metric is deprecated:FutureWarning' )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : int = '[...]'
__a : Tuple = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
__a : Optional[Any] = datasets.load.import_main_class(metric_module.__name__ , dataset=__a )
# check parameters
__a : Dict = inspect.signature(metric._compute ).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values() ) ) # no **kwargs
# run doctest
with self.patch_intensive_calls(__a , metric_module.__name__ ):
with self.use_local_metrics():
try:
__a : str = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@slow
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
__a : Tuple = '[...]'
__a : Optional[Any] = importlib.import_module(
datasets.load.metric_module_factory(os.path.join('metrics' , __a ) ).module_path )
# run doctest
with self.use_local_metrics():
__a : List[Any] = doctest.testmod(__a , verbose=__a , raise_on_error=__a )
self.assertEqual(results.failed , 0 )
self.assertGreater(results.attempted , 1 )
@contextmanager
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](__a ):
yield
else:
yield
@contextmanager
def __UpperCAmelCase ( self ):
'''simple docstring'''
def load_local_metric(__a , *__a , **__a ):
return load_metric(os.path.join('metrics' , __a ) , *__a , **__a )
with patch('datasets.load_metric' ) as mock_load_metric:
__a : Dict = load_local_metric
yield
@classmethod
def __UpperCAmelCase ( cls , __a ):
'''simple docstring'''
def wrapper(__a ):
__a : Optional[Any] = contextmanager(__a )
__a : str = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher('bleurt' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string('sv' , '' , '' ) # handle pytest cli flags
class __UpperCamelCase ( lowerCAmelCase_ ):
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
assert len(input_dict['input_ids'] ) == 2
return np.array([1.03, 1.04] )
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch('bleurt.score._create_predictor' ) as mock_create_predictor:
__a : Dict = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher('bertscore' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : str ):
import torch
def bert_cos_score_idf(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(_SCREAMING_SNAKE_CASE ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch('bert_score.scorer.get_model' ), patch(
'bert_score.scorer.bert_cos_score_idf' ) as mock_bert_cos_score_idf:
__a : str = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher('comet' )
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Dict ):
def load_from_checkpoint(_SCREAMING_SNAKE_CASE : Optional[int] ):
class __UpperCamelCase :
def __UpperCAmelCase ( self , __a , *__a , **__a ):
'''simple docstring'''
assert len(__a ) == 2
__a : Dict = [0.19, 0.92]
return scores, sum(__a ) / len(__a )
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch('comet.download_model' ) as mock_download_model:
__a : str = None
with patch('comet.load_from_checkpoint' ) as mock_load_from_checkpoint:
__a : int = load_from_checkpoint
yield
def lowerCamelCase ():
__a : Optional[Any] = load_metric(os.path.join('metrics' , 'seqeval' ) )
__a : List[str] = 'ERROR'
__a : List[str] = F"""Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}"""
with pytest.raises(_SCREAMING_SNAKE_CASE , match=re.escape(_SCREAMING_SNAKE_CASE ) ):
metric.compute(predictions=[] , references=[] , scheme=_SCREAMING_SNAKE_CASE )
| 27
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase : Tuple = logging.get_logger(__name__)
UpperCAmelCase : Union[str, Any] = torch.device("cpu")
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCamelCase = Image.open(requests.get(lowerCamelCase__ , stream=lowerCamelCase__ ).raw )
return im
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : List[str] ):
'''simple docstring'''
lowerCamelCase = dct.pop(lowerCamelCase__ )
lowerCamelCase = val
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = []
for k in state_dict.keys():
lowerCamelCase = k
if ".pwconv" in k:
lowerCamelCase = k_new.replace(""".pwconv""" , """.point_wise_conv""" )
if ".dwconv" in k:
lowerCamelCase = k_new.replace(""".dwconv""" , """.depth_wise_conv""" )
if ".Proj." in k:
lowerCamelCase = k_new.replace(""".Proj.""" , """.proj.""" )
if "patch_embed" in k_new:
lowerCamelCase = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" )
if "network" in k_new:
lowerCamelCase = k_new.split(""".""" )
if ls[2].isdigit():
lowerCamelCase = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] )
else:
lowerCamelCase = k_new.replace("""network""" , """swiftformer.encoder.network""" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCamelCase ( lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
lowerCamelCase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowerCamelCase = 1000
lowerCamelCase = """huggingface/label-files"""
lowerCamelCase = """imagenet-1k-id2label.json"""
lowerCamelCase = json.load(open(hf_hub_download(lowerCamelCase__ , lowerCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
lowerCamelCase = {int(lowerCamelCase__ ): v for k, v in idalabel.items()}
lowerCamelCase = idalabel
lowerCamelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowerCamelCase = [3, 3, 6, 4]
lowerCamelCase = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
lowerCamelCase = [3, 3, 9, 6]
lowerCamelCase = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
lowerCamelCase = [4, 3, 10, 5]
lowerCamelCase = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
lowerCamelCase = [4, 4, 12, 6]
lowerCamelCase = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("""https""" ):
lowerCamelCase = torch.hub.load_state_dict_from_url(lowerCamelCase__ , map_location="""cpu""" , check_hash=lowerCamelCase__ )
else:
lowerCamelCase = torch.load(lowerCamelCase__ , map_location="""cpu""" )
lowerCamelCase = checkpoint
lowerCamelCase = create_rename_keys(lowerCamelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# load HuggingFace model
lowerCamelCase = SwiftFormerForImageClassification(lowerCamelCase__ ).eval()
hf_model.load_state_dict(lowerCamelCase__ )
# prepare test inputs
lowerCamelCase = prepare_img()
lowerCamelCase = ViTImageProcessor.from_pretrained("""preprocessor_config""" )
lowerCamelCase = processor(images=lowerCamelCase__ , return_tensors="""pt""" )
# compare outputs from both models
lowerCamelCase = get_expected_output(lowerCamelCase__ )
lowerCamelCase = hf_model(inputs["""pixel_values"""] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , lowerCamelCase__ , atol=1E-3 )
Path(lowerCamelCase__ ).mkdir(exist_ok=lowerCamelCase__ )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--swiftformer_name",
default="swiftformer_xs",
choices=["swiftformer_xs", "swiftformer_s", "swiftformer_l1", "swiftformer_l3"],
type=str,
help="Name of the SwiftFormer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="./converted_outputs/",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--original_ckpt", default=None, type=str, help="Path to the original model checkpoint.")
UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 66
|
def __lowerCamelCase ( ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = 1
while len(lowerCamelCase__ ) < 1E6:
constant.append(str(lowerCamelCase__ ) )
i += 1
lowerCamelCase = """""".join(lowerCamelCase__ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 66
| 1
|
def _a ( SCREAMING_SNAKE_CASE_ : Optional[int] ):
if num <= 0:
raise ValueError("Input must be a positive integer" )
__lowerCAmelCase = [True] * (num + 1)
__lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 92
|
from __future__ import annotations
class _a :
def __init__( self: Union[str, Any] , UpperCamelCase_: str , UpperCamelCase_: str ) -> List[Any]:
"""simple docstring"""
lowercase__ , lowercase__ = text, pattern
lowercase__ , lowercase__ = len(UpperCamelCase_ ), len(UpperCamelCase_ )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: str ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase_: int ) -> int:
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def lowerCamelCase_ ( self: List[Any] ) -> list[int]:
"""simple docstring"""
lowercase__ = []
for i in range(self.textLen - self.patLen + 1 ):
lowercase__ = self.mismatch_in_text(UpperCamelCase_ )
if mismatch_index == -1:
positions.append(UpperCamelCase_ )
else:
lowercase__ = self.match_in_pattern(self.text[mismatch_index] )
lowercase__ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowerCAmelCase = 'ABAABA'
lowerCAmelCase = 'AB'
lowerCAmelCase = BoyerMooreSearch(text, pattern)
lowerCAmelCase = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions)
| 110
| 0
|
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : str , __A : Optional[Any]=1_3 , __A : Dict=7 , __A : List[str]=True , __A : Any=True , __A : str=True , __A : Optional[Any]=True , __A : List[str]=9_9 , __A : Dict=3_2 , __A : Tuple=2 , __A : Tuple=4 , __A : Dict=3_7 , __A : Tuple="gelu" , __A : Any=0.1 , __A : str=0.1 , __A : int=5_1_2 , __A : Union[str, Any]=1_6 , __A : Optional[int]=2 , __A : Union[str, Any]=0.0_2 , __A : Tuple=3 , __A : Union[str, Any]=4 , __A : Optional[int]=None , ):
snake_case__ : Optional[int] = parent
snake_case__ : Optional[Any] = 1_3
snake_case__ : int = 7
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = True
snake_case__ : List[str] = True
snake_case__ : int = True
snake_case__ : Optional[int] = 9_9
snake_case__ : Union[str, Any] = 3_8_4
snake_case__ : Optional[Any] = 2
snake_case__ : Union[str, Any] = 4
snake_case__ : Any = 3_7
snake_case__ : Any = "gelu"
snake_case__ : str = 0.1
snake_case__ : Optional[Any] = 0.1
snake_case__ : Union[str, Any] = 5_1_2
snake_case__ : Optional[Any] = 1_6
snake_case__ : List[Any] = 2
snake_case__ : Optional[int] = 0.0_2
snake_case__ : Dict = 3
snake_case__ : Any = 4
snake_case__ : int = 1_2_8
snake_case__ : Dict = 2
snake_case__ : Any = 9
snake_case__ : List[str] = 1
snake_case__ : List[Any] = None
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = None
if self.use_input_mask:
snake_case__ : str = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[Any] = None
snake_case__ : Any = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : int = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Dict , __A : Dict , __A : Dict , __A : Union[str, Any] , __A : Optional[int] , __A : Any , __A : Union[str, Any] , __A : Tuple ):
snake_case__ : Optional[int] = TFConvBertModel(config=__A )
snake_case__ : Optional[int] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : List[str] = [input_ids, input_mask]
snake_case__ : Union[str, Any] = model(__A )
snake_case__ : str = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Union[str, Any] , __A : List[Any] , __A : Any , __A : Union[str, Any] , __A : int , __A : Optional[Any] , __A : Dict , __A : Optional[int] ):
snake_case__ : List[str] = TFConvBertForMaskedLM(config=__A )
snake_case__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Tuple , __A : Union[str, Any] , __A : List[Any] , __A : Any , __A : List[Any] , __A : List[Any] , __A : Optional[int] , __A : List[str] ):
snake_case__ : Any = self.num_labels
snake_case__ : List[Any] = TFConvBertForSequenceClassification(config=__A )
snake_case__ : Any = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : Optional[int] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : int , __A : List[Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : List[Any] , __A : Union[str, Any] , __A : Union[str, Any] , __A : Optional[int] ):
snake_case__ : Optional[Any] = self.num_choices
snake_case__ : Any = TFConvBertForMultipleChoice(config=__A )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[Any] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : int = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case__ : Optional[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : List[str] , __A : Tuple , __A : str , __A : Union[str, Any] , __A : Union[str, Any] , __A : Any , __A : int , __A : Tuple ):
snake_case__ : Dict = self.num_labels
snake_case__ : str = TFConvBertForTokenClassification(config=__A )
snake_case__ : Optional[Any] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : List[str] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , __A : Union[str, Any] , __A : List[Any] , __A : List[str] , __A : Any , __A : Any , __A : Optional[int] , __A : Optional[Any] ):
snake_case__ : Any = TFConvBertForQuestionAnswering(config=__A )
snake_case__ : List[str] = {
"input_ids": input_ids,
"attention_mask": input_mask,
"token_type_ids": token_type_ids,
}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : List[str] = config_and_inputs
snake_case__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
a_ = (
{
"feature-extraction": TFConvBertModel,
"fill-mask": TFConvBertForMaskedLM,
"question-answering": TFConvBertForQuestionAnswering,
"text-classification": TFConvBertForSequenceClassification,
"token-classification": TFConvBertForTokenClassification,
"zero-shot": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
def _lowercase ( self : int ):
snake_case__ : Optional[Any] = TFConvBertModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : List[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Any ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : Dict ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self : Optional[int] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Dict ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
@slow
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : int = True
snake_case__ : int = True
if hasattr(__A , "use_cache" ):
snake_case__ : Optional[Any] = True
snake_case__ : Dict = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case__ : List[str] = getattr(self.model_tester , "key_length" , __A )
for model_class in self.all_model_classes:
snake_case__ : Tuple = self._prepare_for_class(__A , __A )
snake_case__ : List[str] = model_class(__A )
snake_case__ : List[Any] = len(model(__A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A , saved_model=__A )
snake_case__ : str = os.path.join(__A , "saved_model" , "1" )
snake_case__ : str = tf.keras.models.load_model(__A )
snake_case__ : Optional[Any] = model(__A )
if self.is_encoder_decoder:
snake_case__ : Tuple = outputs["encoder_hidden_states"]
snake_case__ : str = outputs["encoder_attentions"]
else:
snake_case__ : Dict = outputs["hidden_states"]
snake_case__ : Tuple = outputs["attentions"]
self.assertEqual(len(__A ) , __A )
snake_case__ : int = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__A ) , __A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(__A )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = True
snake_case__ : List[Any] = getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
snake_case__ : int = getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
snake_case__ : Any = getattr(self.model_tester , "key_length" , __A )
snake_case__ : List[Any] = getattr(self.model_tester , "key_length" , __A )
def check_decoder_attentions_output(__A : Optional[int] ):
snake_case__ : Optional[Any] = len(__A )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : Optional[int] = outputs.decoder_attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__A : Any ):
snake_case__ : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
snake_case__ : Any = False
snake_case__ : Dict = model_class(__A )
snake_case__ : List[Any] = model(self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = len(__A )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
if self.is_encoder_decoder:
snake_case__ : str = model_class(__A )
snake_case__ : List[Any] = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_decoder_attentions_output(__A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = model_class(__A )
snake_case__ : Union[str, Any] = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
# Check attention is always last and order is fine
snake_case__ : Optional[int] = True
snake_case__ : List[Any] = True
snake_case__ : Any = model_class(__A )
snake_case__ : str = model(self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__A ) )
self.assertEqual(model.config.output_hidden_states , __A )
check_encoder_attentions_output(__A )
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : int ):
snake_case__ : int = TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
snake_case__ : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : str = model(__A )[0]
snake_case__ : int = [1, 6, 7_6_8]
self.assertEqual(output.shape , __A )
snake_case__ : List[Any] = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __A , atol=1e-4 )
| 286
|
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["image_processor", "tokenizer"]
a_ = "ViltImageProcessor"
a_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : Optional[int] , __A : Optional[int]=None , __A : Optional[Any]=None , **__A : int ):
snake_case__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
snake_case__ : Tuple = kwargs.pop("feature_extractor" )
snake_case__ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
snake_case__ : Tuple = self.image_processor
def __call__( self : List[Any] , __A : int , __A : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __A : bool = True , __A : Union[bool, str, PaddingStrategy] = False , __A : Union[bool, str, TruncationStrategy] = None , __A : Optional[int] = None , __A : int = 0 , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[bool] = None , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = False , __A : bool = True , __A : Optional[Union[str, TensorType]] = None , **__A : List[Any] , ):
snake_case__ : Optional[int] = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
snake_case__ : Optional[Any] = self.image_processor(__A , return_tensors=__A )
encoding.update(__A )
return encoding
def _lowercase ( self : Optional[Any] , *__A : List[str] , **__A : Optional[int] ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowercase ( self : Dict , *__A : str , **__A : str ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = self.tokenizer.model_input_names
snake_case__ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase ( self : List[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _lowercase ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 286
| 1
|
"""simple docstring"""
import numpy as np
def lowercase__ ( snake_case_ :np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 332
|
"""simple docstring"""
import argparse
import copy
def lowercase__ ( snake_case_ :Tuple ):
__UpperCAmelCase = {}
with open(snake_case_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
__UpperCAmelCase = []
_list.append([line.split()[1], line.split()[2]] )
__UpperCAmelCase = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
__UpperCAmelCase = []
_list.append([line.split()[0], line.split()[2]] )
__UpperCAmelCase = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase__ ( snake_case_ :Dict , snake_case_ :Optional[Any] ):
with open(snake_case_ ) as f:
__UpperCAmelCase = f.read(1 )
__UpperCAmelCase = start_node
__UpperCAmelCase = []
__UpperCAmelCase = start_node
__UpperCAmelCase = 0
while visiting not in first_solution:
__UpperCAmelCase = 10_000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(snake_case_ ) and k[0] not in first_solution:
__UpperCAmelCase = k[1]
__UpperCAmelCase = k[0]
first_solution.append(snake_case_ )
__UpperCAmelCase = distance_of_first_solution + int(snake_case_ )
__UpperCAmelCase = best_node
first_solution.append(snake_case_ )
__UpperCAmelCase = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
__UpperCAmelCase = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10_000
)
return first_solution, distance_of_first_solution
def lowercase__ ( snake_case_ :int , snake_case_ :Tuple ):
__UpperCAmelCase = []
for n in solution[1:-1]:
__UpperCAmelCase = solution.index(snake_case_ )
for kn in solution[1:-1]:
__UpperCAmelCase = solution.index(snake_case_ )
if n == kn:
continue
__UpperCAmelCase = copy.deepcopy(snake_case_ )
__UpperCAmelCase = kn
__UpperCAmelCase = n
__UpperCAmelCase = 0
for k in _tmp[:-1]:
__UpperCAmelCase = _tmp[_tmp.index(snake_case_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
__UpperCAmelCase = distance + int(i[1] )
_tmp.append(snake_case_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
__UpperCAmelCase = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda snake_case_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase__ ( snake_case_ :str , snake_case_ :Union[str, Any] , snake_case_ :Optional[int] , snake_case_ :Dict , snake_case_ :int ):
__UpperCAmelCase = 1
__UpperCAmelCase = first_solution
__UpperCAmelCase = []
__UpperCAmelCase = distance_of_first_solution
__UpperCAmelCase = solution
while count <= iters:
__UpperCAmelCase = find_neighborhood(snake_case_ , snake_case_ )
__UpperCAmelCase = 0
__UpperCAmelCase = neighborhood[index_of_best_solution]
__UpperCAmelCase = len(snake_case_ ) - 1
__UpperCAmelCase = False
while not found:
__UpperCAmelCase = 0
while i < len(snake_case_ ):
if best_solution[i] != solution[i]:
__UpperCAmelCase = best_solution[i]
__UpperCAmelCase = solution[i]
break
__UpperCAmelCase = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
__UpperCAmelCase = True
__UpperCAmelCase = best_solution[:-1]
__UpperCAmelCase = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
__UpperCAmelCase = cost
__UpperCAmelCase = solution
else:
__UpperCAmelCase = index_of_best_solution + 1
__UpperCAmelCase = neighborhood[index_of_best_solution]
if len(snake_case_ ) >= size:
tabu_list.pop(0 )
__UpperCAmelCase = count + 1
return best_solution_ever, best_cost
def lowercase__ ( snake_case_ :str=None ):
__UpperCAmelCase = generate_neighbours(args.File )
__UpperCAmelCase , __UpperCAmelCase = generate_first_solution(
args.File , snake_case_ )
__UpperCAmelCase , __UpperCAmelCase = tabu_search(
snake_case_ , snake_case_ , snake_case_ , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : List[str] = argparse.ArgumentParser(description='Tabu Search')
parser.add_argument(
'-f',
'--File',
type=str,
help='Path to the file containing the data',
required=True,
)
parser.add_argument(
'-i',
'--Iterations',
type=int,
help='How many iterations the algorithm should perform',
required=True,
)
parser.add_argument(
'-s', '--Size', type=int, help='Size of the tabu list', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 332
| 1
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> str:
lowerCamelCase__ : Optional[int] = [
'decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Optional[Any] = emb.weight.shape
lowerCamelCase__ : Optional[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Union[str, Any]:
lowerCamelCase__ : Union[str, Any] = torch.load(_UpperCAmelCase , map_location='cpu' )
lowerCamelCase__ : str = Namespace(**checkpoint['cfg']['model'] )
lowerCamelCase__ : Optional[int] = checkpoint['model']
remove_ignore_keys_(_UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = state_dict['decoder.embed_tokens.weight'].shape[0]
lowerCamelCase__ : Any = {key.replace('decoder' , 'model' ): val for key, val in state_dict.items()}
lowerCamelCase__ : int = XGLMConfig(
vocab_size=_UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='gelu' , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
lowerCamelCase__ : int = XGLMForCausalLM(_UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase )
print(_UpperCAmelCase )
lowerCamelCase__ : int = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
_UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
_UpperCAmelCase : Any = parser.parse_args()
_UpperCAmelCase : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 359
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : List[str] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 45
| 0
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
_a = logging.get_logger(__name__)
@add_end_docstrings(snake_case__)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def __init__( self , **UpperCAmelCase ):
"""simple docstring"""
super().__init__(**UpperCAmelCase )
if self.framework == "tf":
raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" )
requires_backends(self , 'vision' )
self.check_model_type(UpperCAmelCase )
def __call__( self , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ):
"""simple docstring"""
if "text_queries" in kwargs:
_UpperCAmelCase = kwargs.pop('text_queries' )
if isinstance(UpperCAmelCase , (str, Image.Image) ):
_UpperCAmelCase = {'image': image, 'candidate_labels': candidate_labels}
else:
_UpperCAmelCase = image
_UpperCAmelCase = super().__call__(UpperCAmelCase , **UpperCAmelCase )
return results
def UpperCamelCase ( self , **UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = {}
if "threshold" in kwargs:
_UpperCAmelCase = kwargs['threshold']
if "top_k" in kwargs:
_UpperCAmelCase = kwargs['top_k']
return {}, {}, postprocess_params
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = load_image(inputs['image'] )
_UpperCAmelCase = inputs['candidate_labels']
if isinstance(UpperCAmelCase , UpperCAmelCase ):
_UpperCAmelCase = candidate_labels.split(',' )
_UpperCAmelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCAmelCase ):
_UpperCAmelCase = self.tokenizer(UpperCAmelCase , return_tensors=self.framework )
_UpperCAmelCase = self.image_processor(UpperCAmelCase , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCAmelCase ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = model_inputs.pop('target_size' )
_UpperCAmelCase = model_inputs.pop('candidate_label' )
_UpperCAmelCase = model_inputs.pop('is_last' )
_UpperCAmelCase = self.model(**UpperCAmelCase )
_UpperCAmelCase = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs}
return model_outputs
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=0.1 , UpperCAmelCase=None ):
"""simple docstring"""
_UpperCAmelCase = []
for model_output in model_outputs:
_UpperCAmelCase = model_output['candidate_label']
_UpperCAmelCase = BaseModelOutput(UpperCAmelCase )
_UpperCAmelCase = self.image_processor.post_process_object_detection(
outputs=UpperCAmelCase , threshold=UpperCAmelCase , target_sizes=model_output['target_size'] )[0]
for index in outputs["scores"].nonzero():
_UpperCAmelCase = outputs['scores'][index].item()
_UpperCAmelCase = self._get_bounding_box(outputs['boxes'][index][0] )
_UpperCAmelCase = {'score': score, 'label': label, 'box': box}
results.append(UpperCAmelCase )
_UpperCAmelCase = sorted(UpperCAmelCase , key=lambda UpperCAmelCase : x["score"] , reverse=UpperCAmelCase )
if top_k:
_UpperCAmelCase = results[:top_k]
return results
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
if self.framework != "pt":
raise ValueError('The ZeroShotObjectDetectionPipeline is only available in PyTorch.' )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = box.int().tolist()
_UpperCAmelCase = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox
| 39
|
class lowercase_ :
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
lowerCAmelCase = name
lowerCAmelCase = value
lowerCAmelCase = weight
def __repr__( self ) ->str:
return F"{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return self.value
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.name
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return self.weight
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return self.value / self.weight
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
lowerCAmelCase = []
for i in range(len(snake_case__ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> Optional[int]:
lowerCAmelCase = sorted(snake_case__ , key=snake_case__ , reverse=snake_case__ )
lowerCAmelCase = []
lowerCAmelCase , lowerCAmelCase = 0.0, 0.0
for i in range(len(snake_case__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def SCREAMING_SNAKE_CASE_ ( ) -> Optional[int]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
| 0
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class a :
'''simple docstring'''
def __init__( self : int , __snake_case : Any , __snake_case : Dict=2 , __snake_case : List[Any]=True , __snake_case : Union[str, Any]=False , __snake_case : Union[str, Any]=10 , __snake_case : List[str]=3 , __snake_case : Optional[Any]=32 * 8 , __snake_case : Tuple=32 * 8 , __snake_case : List[str]=4 , __snake_case : int=64 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = hidden_dim
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__snake_case )
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=__snake_case )
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=__snake_case ) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=__snake_case ) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase_ = self.num_queries
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = [1, 1, 1, 1]
UpperCAmelCase_ = self.num_channels
UpperCAmelCase_ = 64
UpperCAmelCase_ = 1_28
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
return config
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowerCamelCase_ ( self : str , __snake_case : str , __snake_case : Optional[Any] ):
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__snake_case ) , config.decoder_layers )
def lowerCamelCase_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : List[Any] , __snake_case : Optional[Any] , __snake_case : Optional[int]=False ):
with torch.no_grad():
UpperCAmelCase_ = MaskaFormerModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase_ = model(pixel_values=__snake_case , pixel_mask=__snake_case )
UpperCAmelCase_ = model(__snake_case , output_hidden_states=__snake_case )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__snake_case , __snake_case )
def lowerCamelCase_ ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[Any] ):
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(config=__snake_case )
model.to(__snake_case )
model.eval()
def comm_check_on_output(__snake_case : Tuple ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=__snake_case , pixel_mask=__snake_case )
UpperCAmelCase_ = model(__snake_case )
comm_check_on_output(__snake_case )
UpperCAmelCase_ = model(
pixel_values=__snake_case , pixel_mask=__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
comm_check_on_output(__snake_case )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCAmelCase : Union[str, Any] = {'feature-extraction': MaskaFormerModel} if is_torch_available() else {}
lowerCAmelCase : List[str] = False
lowerCAmelCase : Union[str, Any] = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : Tuple = False
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ = MaskaFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case )
def lowerCamelCase_ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def lowerCamelCase_ ( self : Tuple ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*__snake_case )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowerCamelCase_ ( self : Optional[int] ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowerCamelCase_ ( self : Tuple ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowerCamelCase_ ( self : List[Any] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowerCamelCase_ ( self : Dict ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowerCamelCase_ ( self : Any ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowerCamelCase_ ( self : str ):
pass
def lowerCamelCase_ ( self : Optional[int] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__snake_case )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
'''pixel_values''': torch.randn((2, 3, *size) , device=__snake_case ),
'''mask_labels''': torch.randn((2, 10, *size) , device=__snake_case ),
'''class_labels''': torch.zeros(2 , 10 , device=__snake_case ).long(),
}
UpperCAmelCase_ = self.model_tester.get_config()
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(__snake_case ).to(__snake_case )
UpperCAmelCase_ = model(**__snake_case )
self.assertTrue(outputs.loss is not None )
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(__snake_case , **__snake_case , output_hidden_states=__snake_case )
def lowerCamelCase_ ( self : Union[str, Any] ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(__snake_case ).to(__snake_case )
UpperCAmelCase_ = model(**__snake_case , output_attentions=__snake_case )
self.assertTrue(outputs.attentions is not None )
def lowerCamelCase_ ( self : Optional[Any] ):
if not self.model_tester.is_training:
return
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(__snake_case )
model.to(__snake_case )
model.train()
UpperCAmelCase_ = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case ).loss
loss.backward()
def lowerCamelCase_ ( self : Any ):
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(__snake_case ).to(__snake_case )
model.train()
UpperCAmelCase_ = model(__snake_case , mask_labels=__snake_case , class_labels=__snake_case )
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__snake_case )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCamelCase = 1e-4
def SCREAMING_SNAKE_CASE ( ) -> Dict:
UpperCAmelCase_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self : int ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowerCamelCase_ ( self : Optional[Any] ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(__snake_case )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(__snake_case , return_tensors='''pt''' ).to(__snake_case )
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
UpperCAmelCase_ = model(**__snake_case )
UpperCAmelCase_ = torch.tensor(
[[-0.2_790, -1.0_717, -1.1_668], [-0.5_128, -0.3_128, -0.4_987], [-0.5_832, 0.1_971, -0.0_197]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
UpperCAmelCase_ = torch.tensor(
[[0.8_973, 1.1_847, 1.1_776], [1.1_934, 1.5_040, 1.5_128], [1.1_153, 1.4_486, 1.4_951]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
UpperCAmelCase_ = torch.tensor(
[[2.1_152, 1.7_000, -0.8_603], [1.5_808, 1.8_004, -0.9_353], [1.6_043, 1.7_495, -0.5_999]] ).to(__snake_case )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , __snake_case , atol=__snake_case ) )
def lowerCamelCase_ ( self : List[Any] ):
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(__snake_case , return_tensors='''pt''' ).to(__snake_case )
UpperCAmelCase_ = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__snake_case , (1, 3, 3_84, 3_84) )
with torch.no_grad():
UpperCAmelCase_ = model(**__snake_case )
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_ = [
[-8.7_839, -9.0_056, -8.8_121],
[-7.4_104, -7.0_313, -6.5_401],
[-6.6_105, -6.3_427, -6.4_675],
]
UpperCAmelCase_ = torch.tensor(__snake_case ).to(__snake_case )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , __snake_case , atol=__snake_case ) )
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ = torch.tensor(
[
[1.8_324, -8.0_835, -4.1_922],
[0.8_450, -9.0_050, -3.6_053],
[0.3_045, -7.7_293, -3.0_275],
] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , __snake_case , atol=__snake_case ) )
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(__snake_case ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
UpperCAmelCase_ = inputs['''pixel_values'''].to(__snake_case )
UpperCAmelCase_ = [el.to(__snake_case ) for el in inputs['''mask_labels''']]
UpperCAmelCase_ = [el.to(__snake_case ) for el in inputs['''class_labels''']]
with torch.no_grad():
UpperCAmelCase_ = model(**__snake_case )
self.assertTrue(outputs.loss is not None )
| 355
|
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : str = ['input_values', 'padding_mask']
def __init__( self : Optional[Any] , __snake_case : int = 1 , __snake_case : int = 2_40_00 , __snake_case : float = 0.0 , __snake_case : float = None , __snake_case : float = None , **__snake_case : Dict , ):
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = overlap
@property
def lowerCamelCase_ ( self : List[str] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase_ ( self : List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : List[str] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : Optional[Union[bool, str, PaddingStrategy]] = None , __snake_case : Optional[bool] = False , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase_ = True
UpperCAmelCase_ = bool(
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase_ = [np.asarray(__snake_case , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
UpperCAmelCase_ = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray(__snake_case ).T]
# verify inputs are valid
for idx, example in enumerate(__snake_case ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
UpperCAmelCase_ = None
UpperCAmelCase_ = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase_ = min(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase_ = max(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase_ = '''max_length'''
else:
UpperCAmelCase_ = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase_ = self.pad(
__snake_case , max_length=__snake_case , truncation=__snake_case , padding=__snake_case , return_attention_mask=__snake_case , )
if padding:
UpperCAmelCase_ = padded_inputs.pop('''attention_mask''' )
UpperCAmelCase_ = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
UpperCAmelCase_ = example[..., None]
input_values.append(example.T )
UpperCAmelCase_ = input_values
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 177
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
__a = None
__a = logging.get_logger(__name__)
__a = "▁"
__a = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
__a = {
"google/pegasus-xsum": 5_12,
}
class lowerCamelCase ( _lowerCAmelCase ):
'''simple docstring'''
_A : Tuple = VOCAB_FILES_NAMES
_A : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A : Optional[Any] = PegasusTokenizer
_A : Optional[int] = ["""input_ids""", """attention_mask"""]
def __init__( self: List[str] , snake_case: Dict=None , snake_case: Optional[Any]=None , snake_case: List[str]="<pad>" , snake_case: List[str]="</s>" , snake_case: Optional[int]="<unk>" , snake_case: Optional[int]="<mask_2>" , snake_case: Tuple="<mask_1>" , snake_case: Optional[int]=None , snake_case: List[str]=103 , **snake_case: Tuple , ) -> int:
snake_case_ :Dict = offset
if additional_special_tokens is not None:
if not isinstance(snake_case , snake_case ):
raise TypeError(
f"""additional_special_tokens should be of type {type(snake_case )}, but is"""
f""" {type(snake_case )}""" )
snake_case_ :str = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(snake_case ) , self.offset - 1 )
]
if len(set(snake_case ) ) != len(snake_case ):
raise ValueError(
"""Please make sure that the provided additional_special_tokens do not contain an incorrectly"""
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
snake_case_ :Tuple = additional_special_tokens_extended
else:
snake_case_ :Dict = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
snake_case , tokenizer_file=snake_case , pad_token=snake_case , eos_token=snake_case , unk_token=snake_case , mask_token=snake_case , mask_token_sent=snake_case , offset=snake_case , additional_special_tokens=snake_case , **snake_case , )
snake_case_ :Optional[int] = vocab_file
snake_case_ :str = False if not self.vocab_file else True
def lowerCAmelCase_ ( self: List[str] , snake_case: str ) -> List[Any]:
snake_case_ :Optional[Any] = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
"""There should be 3 special tokens: mask_token, pad_token, and eos_token +"""
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: List , snake_case: Optional[List] = None , snake_case: bool = False ) -> List[int]:
if already_has_special_tokens:
return self._special_token_mask(snake_case )
elif token_ids_a is None:
return self._special_token_mask(snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowerCAmelCase_ ( self: Any , snake_case: List[Any] , snake_case: List[str]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowerCAmelCase_ ( self: str , snake_case: str , snake_case: Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(snake_case ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case_ :Optional[int] = os.path.join(
snake_case , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case ):
copyfile(self.vocab_file , snake_case )
return (out_vocab_file,)
| 66
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[str] = {
'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class lowercase__ ( UpperCamelCase_ , UpperCamelCase_):
UpperCamelCase_ = """nat"""
UpperCamelCase_ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any] , UpperCamelCase__ : int=4 , UpperCamelCase__ : int=3 , UpperCamelCase__ : str=64 , UpperCamelCase__ : List[str]=[3, 4, 6, 5] , UpperCamelCase__ : List[str]=[2, 4, 8, 16] , UpperCamelCase__ : List[Any]=7 , UpperCamelCase__ : int=3.0 , UpperCamelCase__ : Tuple=True , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Tuple=1E-5 , UpperCamelCase__ : Union[str, Any]=0.0 , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Optional[Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = patch_size
SCREAMING_SNAKE_CASE : List[str] = num_channels
SCREAMING_SNAKE_CASE : Dict = embed_dim
SCREAMING_SNAKE_CASE : Optional[int] = depths
SCREAMING_SNAKE_CASE : Optional[Any] = len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = num_heads
SCREAMING_SNAKE_CASE : Optional[int] = kernel_size
SCREAMING_SNAKE_CASE : int = mlp_ratio
SCREAMING_SNAKE_CASE : Dict = qkv_bias
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Tuple = drop_path_rate
SCREAMING_SNAKE_CASE : List[str] = hidden_act
SCREAMING_SNAKE_CASE : Any = layer_norm_eps
SCREAMING_SNAKE_CASE : Dict = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE : List[Any] = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) )
SCREAMING_SNAKE_CASE : Union[str, Any] = layer_scale_init_value
SCREAMING_SNAKE_CASE : Dict = ['''stem'''] + [f"""stage{idx}""" for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
| 258
|
from typing import Union
import fire
import torch
from tqdm import tqdm
def A ( _lowercase , _lowercase = "cpu" , _lowercase = None ):
SCREAMING_SNAKE_CASE : Optional[int] = torch.load(_lowercase , map_location=_lowercase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowercase , torch.Tensor ):
raise TypeError('''FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin''' )
SCREAMING_SNAKE_CASE : List[Any] = v.half()
if save_path is None: # overwrite src_path
SCREAMING_SNAKE_CASE : str = src_path
torch.save(_lowercase , _lowercase )
if __name__ == "__main__":
fire.Fire(convert)
| 258
| 1
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
lowerCamelCase_ : Dict = NewType('DataClass', Any)
lowerCamelCase_ : Any = NewType('DataClassType', Any)
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Tuple = {str(_UpperCAmelCase ): choice for choice in choices}
return lambda _UpperCAmelCase : str_to_choice.get(_UpperCAmelCase , _UpperCAmelCase )
def UpperCAmelCase__ ( *,
_UpperCAmelCase = None , _UpperCAmelCase = None , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = dataclasses.MISSING , _UpperCAmelCase = None , **_UpperCAmelCase , ):
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
A_ : Optional[Any] = {}
if aliases is not None:
A_ : Tuple = aliases
if help is not None:
A_ : List[Any] = help
return dataclasses.field(metadata=_UpperCAmelCase , default=_UpperCAmelCase , default_factory=_UpperCAmelCase , **_UpperCAmelCase )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : Iterable[DataClassType]
def __init__( self , snake_case_ , **snake_case_ ):
"""simple docstring"""
if "formatter_class" not in kwargs:
A_ : Union[str, Any] = ArgumentDefaultsHelpFormatter
super().__init__(**snake_case_ )
if dataclasses.is_dataclass(snake_case_ ):
A_ : List[str] = [dataclass_types]
A_ : Any = list(snake_case_ )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(snake_case_ )
@staticmethod
def lowerCamelCase_ ( snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = F"""--{field.name}"""
A_ : Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , snake_case_ ):
raise RuntimeError(
'Unresolved type detected, which should have been done with the help of '
'`typing.get_type_hints` method by default' )
A_ : int = kwargs.pop('aliases' , [] )
if isinstance(snake_case_ , snake_case_ ):
A_ : List[str] = [aliases]
A_ : List[str] = getattr(field.type , '__origin__' , field.type )
if origin_type is Union or (hasattr(snake_case_ , 'UnionType' ) and isinstance(snake_case_ , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(snake_case_ ) not in field.type.__args__
):
raise ValueError(
'Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because'
' the argument parser only supports one type per argument.'
F""" Problem encountered in field '{field.name}'.""" )
if type(snake_case_ ) not in field.type.__args__:
# filter `str` in Union
A_ : Optional[int] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
A_ : List[Any] = getattr(field.type , '__origin__' , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
A_ : int = (
field.type.__args__[0] if isinstance(snake_case_ , field.type.__args__[1] ) else field.type.__args__[1]
)
A_ : Optional[int] = getattr(field.type , '__origin__' , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
A_ : Union[str, Any] = {}
if origin_type is Literal or (isinstance(field.type , snake_case_ ) and issubclass(field.type , snake_case_ )):
if origin_type is Literal:
A_ : Optional[int] = field.type.__args__
else:
A_ : Optional[Any] = [x.value for x in field.type]
A_ : List[str] = make_choice_type_function(kwargs['choices'] )
if field.default is not dataclasses.MISSING:
A_ : Tuple = field.default
else:
A_ : Any = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
A_ : Dict = copy(snake_case_ )
# Hack because type=bool in argparse does not behave as we want.
A_ : List[Any] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
A_ : Union[str, Any] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
A_ : int = default
# This tells argparse we accept 0 or 1 value after --field_name
A_ : int = '?'
# This is the value that will get picked if we do --field_name (without value)
A_ : Dict = True
elif isclass(snake_case_ ) and issubclass(snake_case_ , snake_case_ ):
A_ : int = field.type.__args__[0]
A_ : Tuple = '+'
if field.default_factory is not dataclasses.MISSING:
A_ : Dict = field.default_factory()
elif field.default is dataclasses.MISSING:
A_ : Dict = True
else:
A_ : List[str] = field.type
if field.default is not dataclasses.MISSING:
A_ : Union[str, Any] = field.default
elif field.default_factory is not dataclasses.MISSING:
A_ : Tuple = field.default_factory()
else:
A_ : Tuple = True
parser.add_argument(snake_case_ , *snake_case_ , **snake_case_ )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
A_ : List[str] = False
parser.add_argument(F"""--no_{field.name}""" , action='store_false' , dest=field.name , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
if hasattr(snake_case_ , '_argument_group_name' ):
A_ : List[str] = self.add_argument_group(dtype._argument_group_name )
else:
A_ : Union[str, Any] = self
try:
A_ : Dict[str, type] = get_type_hints(snake_case_ )
except NameError:
raise RuntimeError(
F"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
'removing line of `from __future__ import annotations` which opts in Postponed '
'Evaluation of Annotations (PEP 563)' )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(snake_case_ ):
A_ : Optional[int] = '.'.join(map(snake_case_ , sys.version_info[:3] ) )
raise RuntimeError(
F"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
'line of `from __future__ import annotations` which opts in union types as '
'`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To '
'support Python versions that lower than 3.10, you need to use '
'`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of '
'`X | None`.' ) from ex
raise
for field in dataclasses.fields(snake_case_ ):
if not field.init:
continue
A_ : str = type_hints[field.name]
self._parse_dataclass_field(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , snake_case_=None , snake_case_=False , snake_case_=True , snake_case_=None , snake_case_=None , ):
"""simple docstring"""
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
A_ : Dict = []
if args_filename:
args_files.append(Path(snake_case_ ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix('.args' ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
A_ : Optional[Any] = ArgumentParser()
args_file_parser.add_argument(snake_case_ , type=snake_case_ , action='append' )
# Use only remaining args for further parsing (remove the args_file_flag)
A_ , A_ : List[Any] = args_file_parser.parse_known_args(args=snake_case_ )
A_ : Any = vars(snake_case_ ).get(args_file_flag.lstrip('-' ) , snake_case_ )
if cmd_args_file_paths:
args_files.extend([Path(snake_case_ ) for p in cmd_args_file_paths] )
A_ : List[Any] = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
A_ : Tuple = file_args + args if args is not None else file_args + sys.argv[1:]
A_ , A_ : str = self.parse_known_args(args=snake_case_ )
A_ : Optional[int] = []
for dtype in self.dataclass_types:
A_ : int = {f.name for f in dataclasses.fields(snake_case_ ) if f.init}
A_ : List[str] = {k: v for k, v in vars(snake_case_ ).items() if k in keys}
for k in keys:
delattr(snake_case_ , snake_case_ )
A_ : List[Any] = dtype(**snake_case_ )
outputs.append(snake_case_ )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(snake_case_ )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(F"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = False ):
"""simple docstring"""
A_ : Optional[int] = set(args.keys() )
A_ : int = []
for dtype in self.dataclass_types:
A_ : Dict = {f.name for f in dataclasses.fields(snake_case_ ) if f.init}
A_ : int = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
A_ : Union[str, Any] = dtype(**snake_case_ )
outputs.append(snake_case_ )
if not allow_extra_keys and unused_keys:
raise ValueError(F"""Some keys are not used by the HfArgumentParser: {sorted(snake_case_ )}""" )
return tuple(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = False ):
"""simple docstring"""
with open(Path(snake_case_ ) , encoding='utf-8' ) as open_json_file:
A_ : Optional[Any] = json.loads(open_json_file.read() )
A_ : List[str] = self.parse_dict(snake_case_ , allow_extra_keys=snake_case_ )
return tuple(snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ = False ):
"""simple docstring"""
A_ : str = self.parse_dict(yaml.safe_load(Path(snake_case_ ).read_text() ) , allow_extra_keys=snake_case_ )
return tuple(snake_case_ )
| 286
|
"""simple docstring"""
import inspect
import jax
import jax.lax as lax
import jax.numpy as jnp
from ..utils import add_start_docstrings
from ..utils.logging import get_logger
lowerCamelCase_ : Dict = get_logger(__name__)
lowerCamelCase_ : List[str] = r'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n'
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase :
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
raise NotImplementedError(
F"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
@add_start_docstrings(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
"""simple docstring"""
for processor in self:
A_ : Tuple = inspect.signature(processor.__call__ ).parameters
if len(snake_case_ ) > 3:
if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ):
raise ValueError(
F"""Make sure that all the required parameters: {list(function_args.keys() )} for """
F"""{processor.__class__} are passed to the logits processor.""" )
A_ : Tuple = processor(snake_case_ , snake_case_ , snake_case_ , **snake_case_ )
else:
A_ : Optional[Any] = processor(snake_case_ , snake_case_ , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not (temperature > 0):
raise ValueError(F"""`temperature` has to be a strictly positive float, but is {temperature}""" )
A_ : Optional[int] = temperature
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = scores / self.temperature
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or (top_p < 0 or top_p > 1.0):
raise ValueError(F"""`top_p` has to be a float > 0 and < 1, but is {top_p}""" )
if not isinstance(snake_case_ , snake_case_ ) or (min_tokens_to_keep < 1):
raise ValueError(F"""`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}""" )
A_ : str = top_p
A_ : Union[str, Any] = filter_value
A_ : int = min_tokens_to_keep
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : Tuple = lax.top_k(snake_case_ , scores.shape[-1] )
A_ : List[Any] = jnp.full_like(snake_case_ , self.filter_value )
A_ : List[str] = jax.nn.softmax(snake_case_ , axis=-1 ).cumsum(axis=-1 )
A_ : Optional[int] = cumulative_probs < self.top_p
# include the token that is higher than top_p as well
A_ : Union[str, Any] = jnp.roll(snake_case_ , 1 )
score_mask |= score_mask.at[:, 0].set(snake_case_ )
# min tokens to keep
A_ : int = score_mask.at[:, : self.min_tokens_to_keep].set(snake_case_ )
A_ : Optional[Any] = jnp.where(snake_case_ , snake_case_ , snake_case_ )
A_ : List[Any] = jax.lax.sort_key_val(snake_case_ , snake_case_ )[-1]
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ = -float('Inf' ) , snake_case_ = 1 ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or top_k <= 0:
raise ValueError(F"""`top_k` has to be a strictly positive integer, but is {top_k}""" )
A_ : str = max(snake_case_ , snake_case_ )
A_ : Union[str, Any] = filter_value
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ , A_ : int = scores.shape
A_ : Tuple = jnp.full(batch_size * vocab_size , self.filter_value )
A_ : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check
A_ , A_ : Dict = lax.top_k(snake_case_ , snake_case_ )
A_ : Optional[int] = jnp.broadcast_to((jnp.arange(snake_case_ ) * vocab_size)[:, None] , (batch_size, topk) ).flatten()
A_ : int = topk_scores.flatten()
A_ : Any = topk_indices.flatten() + shift
A_ : List[str] = next_scores_flat.at[topk_indices_flat].set(snake_case_ )
A_ : Union[str, Any] = next_scores_flat.reshape(snake_case_ , snake_case_ )
return next_scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = bos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Union[str, Any] = 1 - jnp.bool_(cur_len - 1 )
A_ : str = jnp.where(snake_case_ , new_scores.at[:, self.bos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = max_length
A_ : Optional[int] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Union[str, Any] = jnp.full(scores.shape , -float('inf' ) )
A_ : Dict = 1 - jnp.bool_(cur_len - self.max_length + 1 )
A_ : Dict = jnp.where(snake_case_ , new_scores.at[:, self.eos_token_id].set(0 ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or min_length < 0:
raise ValueError(F"""`min_length` has to be a positive integer, but is {min_length}""" )
if not isinstance(snake_case_ , snake_case_ ) or eos_token_id < 0:
raise ValueError(F"""`eos_token_id` has to be a positive integer, but is {eos_token_id}""" )
A_ : Any = min_length
A_ : List[Any] = eos_token_id
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 )
A_ : Optional[Any] = jnp.where(snake_case_ , scores.at[:, self.eos_token_id].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
A_ : Tuple = begin_index
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Dict = 1 - jnp.bool_(cur_len - self.begin_index )
A_ : int = jnp.where(snake_case_ , scores.at[:, self.begin_suppress_tokens].set(-float('inf' ) ) , snake_case_ )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : List[Any] = list(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Optional[Any] = scores.at[..., self.suppress_tokens].set(-float('inf' ) )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ ):
"""simple docstring"""
A_ : Any = dict(snake_case_ )
# Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the
# index of the array corresponds to the index of the token to be forced, for XLA compatibility.
# Indexes without forced tokens will have a negative value.
A_ : Tuple = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1
for index, token in force_token_map.items():
if token is not None:
A_ : Tuple = force_token_array.at[index].set(snake_case_ )
A_ : Any = jnp.intaa(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
def _force_token(snake_case_ ):
A_ : List[Any] = scores.shape[0]
A_ : Any = self.force_token_array[generation_idx]
A_ : Tuple = jnp.ones_like(snake_case_ , dtype=scores.dtype ) * -float('inf' )
A_ : List[Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype )
A_ : int = lax.dynamic_update_slice(snake_case_ , snake_case_ , (0, current_token) )
return new_scores
A_ : int = lax.cond(
cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond(
self.force_token_array[cur_len] >= 0 , lambda: _force_token(snake_case_ ) , lambda: scores , ) , )
return scores
class _UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : Tuple = generate_config.eos_token_id
A_ : Optional[int] = generate_config.no_timestamps_token_id
A_ : List[str] = generate_config.no_timestamps_token_id + 1
A_ : Any = decoder_input_length + 1
if generate_config.is_multilingual:
# room for language token and task token
self.begin_index += 2
if hasattr(snake_case_ , 'max_initial_timestamp_index' ):
A_ : List[Any] = generate_config.max_initial_timestamp_index
else:
A_ : Any = model_config.vocab_size
if self.max_initial_timestamp_index is None:
A_ : Optional[Any] = model_config.vocab_size
def __call__( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float('inf' ) )
def handle_pairs(snake_case_ , snake_case_ ):
A_ : Any = jnp.where((cur_len - self.begin_index) >= 1 , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , snake_case_ , )
A_ : Tuple = jnp.where((cur_len - self.begin_index) < 2 , snake_case_ , snake_case_ )
A_ : Any = jnp.where(
input_ids_k[cur_len - 2] >= self.timestamp_begin , snake_case_ , snake_case_ , )
return jnp.where(
snake_case_ , jnp.where(
penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float('inf' ) ) , scores_k.at[: self.eos_token_id].set(-float('inf' ) ) , ) , snake_case_ , )
A_ : Tuple = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
A_ : Optional[Any] = jnp.where(cur_len == self.begin_index , snake_case_ , snake_case_ )
A_ : Tuple = jnp.where(
self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , snake_case_ , )
A_ : int = self.timestamp_begin + self.max_initial_timestamp_index
A_ : List[Any] = jnp.where(
snake_case_ , scores.at[:, last_allowed + 1 :].set(-float('inf' ) ) , snake_case_ , )
# if sum of probability over timestamps is above any other token, sample timestamp
A_ : Any = jax.nn.log_softmax(snake_case_ , axis=-1 )
def handle_cumulative_probs(snake_case_ , snake_case_ ):
A_ : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 )
A_ : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] )
return jnp.where(
timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float('inf' ) ) , snake_case_ , )
A_ : Union[str, Any] = jax.vmap(snake_case_ )(snake_case_ , snake_case_ )
return scores
| 286
| 1
|
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_snake_case = logging.get_logger(__name__)
_snake_case = {"vocab_file": "vocab.txt", "emoji_file": "emoji.json"}
_snake_case = {
"vocab_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt",
},
"emoji_file": {
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json",
},
}
_snake_case = {
"abeja/gpt-neox-japanese-2.7b": 2048,
}
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase : Any = json.loads(f.read() )
_lowerCAmelCase : List[str] = collections.OrderedDict()
_lowerCAmelCase : Tuple = collections.OrderedDict()
_lowerCAmelCase : List[str] = collections.OrderedDict()
with open(_lowerCamelCase , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase : List[Any] = f.readlines()
_lowerCAmelCase : Optional[int] = [[t.rstrip("\n" )] if (t == "," or "," not in t) else t.rstrip("\n" ).split("," ) for t in token]
for idx, b in enumerate(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = b
_lowerCAmelCase : str = idx
for wd in b:
_lowerCAmelCase : Dict = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class UpperCAmelCase_ ( a):
lowerCamelCase__ = VOCAB_FILES_NAMES
lowerCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self, __a, __a, __a="<|endoftext|>", __a="<|endoftext|>", __a="<|startoftext|>", __a="<|endoftext|>", __a=False, **__a, ):
'''simple docstring'''
super().__init__(
unk_token=__a, pad_token=__a, bos_token=__a, eos_token=__a, do_clean_text=__a, **__a, )
if not os.path.isfile(__a):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
if not os.path.isfile(__a):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`")
_lowerCAmelCase : Dict = do_clean_text
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_vocab_and_emoji(__a, __a)
_lowerCAmelCase : List[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab, ids_to_tokens=self.ids_to_tokens, emoji=self.emoji)
@property
def snake_case__ ( self):
'''simple docstring'''
return len(self.raw_vocab)
def snake_case__ ( self):
'''simple docstring'''
return dict(self.raw_vocab, **self.added_tokens_encoder)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.subword_tokenizer.tokenize(__a, clean=self.do_clean_text)
def snake_case__ ( self, __a):
'''simple docstring'''
return self.vocab.get(__a, self.vocab.get(self.unk_token))
def snake_case__ ( self, __a):
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__a)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Tuple = "".join(__a).strip()
return out_string
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__a, add_special_tokens=__a) + [self.eos_token_id])
if len(__a) > self.model_max_length:
_lowerCAmelCase : Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
def snake_case__ ( self, __a, __a = None):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = 0
if os.path.isdir(__a):
_lowerCAmelCase : Any = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
_lowerCAmelCase : List[Any] = os.path.join(
__a, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["emoji_file"])
else:
_lowerCAmelCase : Optional[Any] = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["vocab_file"]
)
_lowerCAmelCase : Any = (
(filename_prefix + "-" if filename_prefix else "") + save_directory + VOCAB_FILES_NAMES["emoji_file"]
)
with open(__a, "w", encoding="utf-8") as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!")
_lowerCAmelCase : Optional[Any] = token_index
writer.write(",".join(__a) + "\n")
index += 1
with open(__a, "w", encoding="utf-8") as writer:
json.dump(self.emoji, __a)
return vocab_file, emoji_file
class UpperCAmelCase_ ( a):
def __init__( self, __a, __a, __a):
'''simple docstring'''
_lowerCAmelCase : Dict = vocab # same as swe
_lowerCAmelCase : int = ids_to_tokens # same as bpe
_lowerCAmelCase : Union[str, Any] = emoji
_lowerCAmelCase : int = np.max([len(__a) for w in self.vocab.keys()])
_lowerCAmelCase : Dict = re.compile(R"(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)")
_lowerCAmelCase : Optional[Any] = re.compile(R"[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*")
_lowerCAmelCase : List[Any] = re.compile(R"[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}")
_lowerCAmelCase : List[str] = re.compile(
R"([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
_lowerCAmelCase : List[Any] = re.compile(
R"(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*")
_lowerCAmelCase : List[Any] = re.compile(
R"((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*")
_lowerCAmelCase : List[str] = "─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"
_lowerCAmelCase : List[Any] = "▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"
_lowerCAmelCase : Tuple = str.maketrans({k: "<BLOCK>" for k in keisen + blocks})
def __len__( self):
'''simple docstring'''
return len(self.ids_to_tokens)
def snake_case__ ( self, __a):
'''simple docstring'''
_lowerCAmelCase : Any = self.content_repattera.sub("<URL>", __a)
_lowerCAmelCase : int = self.content_repattera.sub("<EMAIL>", __a)
_lowerCAmelCase : Tuple = self.content_repattera.sub("<TEL>", __a)
_lowerCAmelCase : Optional[int] = self.content_repattera.sub("<DATE>", __a)
_lowerCAmelCase : Dict = self.content_repattera.sub("<DATE>", __a)
_lowerCAmelCase : str = self.content_repattera.sub("<PRICE>", __a)
_lowerCAmelCase : str = content.translate(self.content_transa)
while "<BLOCK><BLOCK>" in content:
_lowerCAmelCase : Tuple = content.replace("<BLOCK><BLOCK>", "<BLOCK>")
return content
def snake_case__ ( self, __a, __a=False):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = text.replace(" ", "<SP>")
_lowerCAmelCase : Tuple = text.replace(" ", "<SP>")
_lowerCAmelCase : Tuple = text.replace("\r\n", "<BR>")
_lowerCAmelCase : Optional[int] = text.replace("\n", "<BR>")
_lowerCAmelCase : Optional[int] = text.replace("\r", "<BR>")
_lowerCAmelCase : List[Any] = text.replace("\t", "<TAB>")
_lowerCAmelCase : Tuple = text.replace("—", "ー")
_lowerCAmelCase : Dict = text.replace("−", "ー")
for k, v in self.emoji["emoji"].items():
if k in text:
_lowerCAmelCase : Union[str, Any] = text.replace(__a, __a)
if clean:
_lowerCAmelCase : Optional[Any] = self.clean_text(__a)
def check_simbol(__a):
_lowerCAmelCase : List[str] = x.encode()
if len(__a) == 1 and len(__a) == 2:
_lowerCAmelCase : Any = (int(e[0]) << 8) + int(e[1])
if (
(c >= 0XC2_A1 and c <= 0XC2_BF)
or (c >= 0XC7_80 and c <= 0XC7_83)
or (c >= 0XCA_B9 and c <= 0XCB_BF)
or (c >= 0XCC_80 and c <= 0XCD_A2)
):
return True
return False
def checkuae(__a):
_lowerCAmelCase : Union[str, Any] = x.encode()
if len(__a) == 1 and len(__a) == 3:
_lowerCAmelCase : int = (int(e[0]) << 16) + (int(e[1]) << 8) + int(e[2])
if c >= 0XE2_80_80 and c <= 0XE2_B0_7F:
return True
return False
_lowerCAmelCase : int = 0
_lowerCAmelCase : List[str] = []
while pos < len(__a):
_lowerCAmelCase : Dict = min(len(__a), pos + self.maxlen + 1) if text[pos] == "<" else pos + 3
_lowerCAmelCase : Union[str, Any] = [] # (token_id, token, pos)
for e in range(__a, __a, -1):
_lowerCAmelCase : str = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__a) > 2:
_lowerCAmelCase : List[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e))
if len(__a) > 0:
# the smallest token_id is adopted
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : int = sorted(__a, key=lambda __a: x[0])[0]
result.append(__a)
_lowerCAmelCase : Tuple = e
else:
_lowerCAmelCase : Tuple = pos + 1
_lowerCAmelCase : Optional[int] = text[pos:end]
if check_simbol(__a):
result.append("<KIGOU>")
elif checkuae(__a):
result.append("<U2000U2BFF>")
else:
for i in wd.encode("utf-8"):
result.append("<|byte%d|>" % i)
_lowerCAmelCase : Any = end
return result
def snake_case__ ( self, __a, __a="\n"):
'''simple docstring'''
_lowerCAmelCase : Any = []
_lowerCAmelCase : int = []
_lowerCAmelCase : str = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2]))
else:
if len(__a) > 0:
words.append(bytearray(__a).decode("utf-8", errors="replace"))
_lowerCAmelCase : Tuple = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["emoji_inv"][word])
elif word == "<SP>":
words.append(" ")
elif word == "<BR>":
words.append(__a)
elif word == "<TAB>":
words.append("\t")
elif word == "<BLOCK>":
words.append("▀")
elif word == "<KIGOU>":
words.append("ǀ")
elif word == "<U2000U2BFF>":
words.append("‖")
else:
words.append(__a)
if len(__a) > 0:
words.append(bytearray(__a).decode("utf-8", errors="replace"))
_lowerCAmelCase : Optional[int] = "".join(__a)
return text
| 300
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_snake_case = 1.0_5457_1817e-34 # unit of ℏ : J * s
_snake_case = 3e8 # unit of c : m * s^-1
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_lowerCAmelCase : Optional[int] = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_lowerCAmelCase : List[str] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_lowerCAmelCase : Dict = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 300
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class __UpperCamelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase_ = 'focalnet'
def __init__( self : List[Any] , _A : List[Any]=224 , _A : Optional[int]=4 , _A : Union[str, Any]=3 , _A : str=96 , _A : Dict=False , _A : int=[192, 384, 768, 768] , _A : Optional[Any]=[2, 2, 6, 2] , _A : Union[str, Any]=[2, 2, 2, 2] , _A : Tuple=[3, 3, 3, 3] , _A : List[Any]="gelu" , _A : Dict=4.0 , _A : str=0.0 , _A : Optional[int]=0.1 , _A : Optional[int]=False , _A : int=1e-4 , _A : List[str]=False , _A : str=False , _A : int=False , _A : Any=0.02 , _A : Optional[int]=1e-5 , _A : int=32 , _A : List[str]=None , _A : Tuple=None , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_a )
__SCREAMING_SNAKE_CASE : Dict = image_size
__SCREAMING_SNAKE_CASE : Any = patch_size
__SCREAMING_SNAKE_CASE : List[str] = num_channels
__SCREAMING_SNAKE_CASE : int = embed_dim
__SCREAMING_SNAKE_CASE : Optional[Any] = use_conv_embed
__SCREAMING_SNAKE_CASE : List[str] = hidden_sizes
__SCREAMING_SNAKE_CASE : Tuple = depths
__SCREAMING_SNAKE_CASE : Optional[Any] = focal_levels
__SCREAMING_SNAKE_CASE : Tuple = focal_windows
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_act
__SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
__SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob
__SCREAMING_SNAKE_CASE : str = drop_path_rate
__SCREAMING_SNAKE_CASE : Optional[Any] = use_layerscale
__SCREAMING_SNAKE_CASE : int = layerscale_value
__SCREAMING_SNAKE_CASE : Dict = use_post_layernorm
__SCREAMING_SNAKE_CASE : Any = use_post_layernorm_in_modulation
__SCREAMING_SNAKE_CASE : int = normalize_modulator
__SCREAMING_SNAKE_CASE : List[str] = initializer_range
__SCREAMING_SNAKE_CASE : Any = layer_norm_eps
__SCREAMING_SNAKE_CASE : Optional[int] = encoder_stride
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(self.depths ) + 1 )]
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = get_aligned_output_features_output_indices(
out_features=_a , out_indices=_a , stage_names=self.stage_names )
| 303
|
"""simple docstring"""
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self ):
__a = inspect.getfile(accelerate.test_utils )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
__a = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_distributed_data_loop.py'''] )
__a = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_ops.py'''] )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(_a , env=os.environ.copy() )
@require_multi_gpu
def __UpperCAmelCase ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
__a = ['''torchrun''', f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices='''0,1''' ):
execute_subprocess_async(_a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase_ = Accelerator()
lowercase_ = (accelerator.state.process_index + 2, 1_0)
lowercase_ = torch.randint(0, 1_0, shape).to(accelerator.device)
lowercase_ = ""
lowercase_ = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
lowercase_ = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += F"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
lowercase_ = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 45
| 0
|
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = len(lowerCAmelCase__ )
print('''The following activities are selected:''' )
# The first activity is always selected
SCREAMING_SNAKE_CASE_ = 0
print(lowerCAmelCase__, end=''',''' )
# Consider rest of the activities
for j in range(lowerCAmelCase__ ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowerCAmelCase__, end=''',''' )
SCREAMING_SNAKE_CASE_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = [1, 3, 0, 5, 8, 5]
__UpperCAmelCase = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 366
|
import os
import unittest
from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ =TransfoXLTokenizer
UpperCAmelCase_ =False
UpperCAmelCase_ =False
def _UpperCamelCase ( self ) -> Any:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'''<unk>''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''unwanted''',
'''wa''',
'''un''',
'''running''',
''',''',
'''low''',
'''l''',
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def _UpperCamelCase ( self , **_A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = True
return TransfoXLTokenizer.from_pretrained(self.tmpdirname , **_A )
def _UpperCamelCase ( self , _A ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = '''<unk> UNwanted , running'''
SCREAMING_SNAKE_CASE_ = '''<unk> unwanted, running'''
return input_text, output_text
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(vocab_file=self.vocab_file , lower_case=_A )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('''<unk> UNwanted , running''' )
self.assertListEqual(_A , ['''<unk>''', '''unwanted''', ''',''', '''running'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_A ) , [0, 4, 8, 7] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
def _UpperCamelCase ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo ! how \n Are yoU ? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def _UpperCamelCase ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = TransfoXLTokenizer(lower_case=_A )
SCREAMING_SNAKE_CASE_ = '''Hello (bracket) and side-scrolled [and] Henry\'s $5,000 with 3.34 m. What\'s up!?'''
SCREAMING_SNAKE_CASE_ = [
'''Hello''',
'''(''',
'''bracket''',
''')''',
'''and''',
'''side''',
'''@-@''',
'''scrolled''',
'''[''',
'''and''',
''']''',
'''Henry''',
'''\'s''',
'''$''',
'''5''',
'''@,@''',
'''000''',
'''with''',
'''3''',
'''@.@''',
'''34''',
'''m''',
'''.''',
'''What''',
'''\'s''',
'''up''',
'''!''',
'''?''',
]
self.assertListEqual(tokenizer.tokenize(_A ) , _A )
self.assertEqual(tokenizer.convert_tokens_to_string(_A ) , _A )
def _UpperCamelCase ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = len(_A )
tokenizer.add_tokens(['''new1''', '''new2'''] )
tokenizer.move_added_token('''new1''' , 1 )
# Check that moved token is not copied (duplicate)
self.assertEqual(len(_A ) , original_len + 2 )
# Check that token is moved to specified id
self.assertEqual(tokenizer.encode('''new1''' ) , [1] )
self.assertEqual(tokenizer.decode([1] ) , '''new1''' )
| 257
| 0
|
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __a : Optional[int] , __a : Optional[int]=7 , __a : str=3 , __a : Union[str, Any]=30 , __a : Optional[int]=4_00 , __a : List[str]=True , __a : Dict=None , __a : List[Any]=True , __a : str=[0.5, 0.5, 0.5] , __a : Any=[0.5, 0.5, 0.5] , __a : Optional[Any]=True , __a : Union[str, Any]=1 / 2_55 , __a : Any=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_a = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
_a = parent
_a = batch_size
_a = num_channels
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
_a = do_rescale
_a = rescale_factor
_a = do_pad
def UpperCamelCase__ ( self : Optional[Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def UpperCamelCase__ ( self : List[Any] , __a : List[Any] , __a : List[Any]=False ):
if not batched:
_a = image_inputs[0]
if isinstance(_UpperCAmelCase , Image.Image ):
_a = image.size
else:
_a = image.shape[1], image.shape[2]
if w < h:
_a = int(self.size["shortest_edge"] * h / w )
_a = self.size['''shortest_edge''']
elif w > h:
_a = self.size['''shortest_edge''']
_a = int(self.size["shortest_edge"] * w / h )
else:
_a = self.size['''shortest_edge''']
_a = self.size['''shortest_edge''']
else:
_a = []
for image in image_inputs:
_a = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_a = max(_UpperCAmelCase , key=lambda __a : item[0] )[0]
_a = max(_UpperCAmelCase , key=lambda __a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (_UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__a =YolosImageProcessor if is_vision_available() else None
def UpperCamelCase__ ( self : str ):
_a = YolosImageProcessingTester(self )
@property
def UpperCamelCase__ ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size" ) )
def UpperCamelCase__ ( self : List[str] ):
_a = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
_a = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCAmelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , _UpperCAmelCase )
def UpperCamelCase__ ( self : List[Any] ):
pass
def UpperCamelCase__ ( self : int ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
_a = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self : Optional[Any] ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
_a = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self : int ):
# Initialize image_processing
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
_a = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
_a = self.image_processor_tester.get_expected_values(_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_a = image_processing(_UpperCAmelCase , return_tensors="pt" ).pixel_values
_a = self.image_processor_tester.get_expected_values(_UpperCAmelCase , batched=_UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase__ ( self : Dict ):
# Initialize image_processings
_a = self.image_processing_class(**self.image_processor_dict )
_a = self.image_processing_class(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase , do_rescale=_UpperCAmelCase )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
_a = image_processing_a.pad(_UpperCAmelCase , return_tensors="pt" )
_a = image_processing_a(_UpperCAmelCase , return_tensors="pt" )
self.assertTrue(
torch.allclose(encoded_images_with_method["pixel_values"] , encoded_images["pixel_values"] , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self : Tuple ):
# prepare image and target
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
_a = YolosImageProcessor.from_pretrained("hustvl/yolos-small" )
_a = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCAmelCase )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
_a = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCAmelCase ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCAmelCase )
_a = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCAmelCase ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCAmelCase ) )
# verify class_labels
_a = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCAmelCase ) )
# verify orig_size
_a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCAmelCase ) )
# verify size
_a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCAmelCase ) )
@slow
def UpperCamelCase__ ( self : List[Any] ):
# prepare image, target and masks_path
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
_a = json.loads(f.read() )
_a = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
_a = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
_a = YolosImageProcessor(format="coco_panoptic" )
_a = image_processing(images=_UpperCAmelCase , annotations=_UpperCAmelCase , masks_path=_UpperCAmelCase , return_tensors="pt" )
# verify pixel values
_a = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , _UpperCAmelCase )
_a = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , _UpperCAmelCase , atol=1e-4 ) )
# verify area
_a = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , _UpperCAmelCase ) )
# verify boxes
_a = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , _UpperCAmelCase )
_a = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , _UpperCAmelCase , atol=1e-3 ) )
# verify image_id
_a = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , _UpperCAmelCase ) )
# verify is_crowd
_a = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , _UpperCAmelCase ) )
# verify class_labels
_a = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , _UpperCAmelCase ) )
# verify masks
_a = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , _UpperCAmelCase )
# verify orig_size
_a = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , _UpperCAmelCase ) )
# verify size
_a = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , _UpperCAmelCase ) )
| 63
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__A = {
"configuration_lilt": ["LILT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LiltConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"LILT_PRETRAINED_MODEL_ARCHIVE_LIST",
"LiltForQuestionAnswering",
"LiltForSequenceClassification",
"LiltForTokenClassification",
"LiltModel",
"LiltPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 177
| 0
|
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> list[str]:
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
lowerCamelCase__ : Optional[Any] = number_of_bytes // partitions
lowerCamelCase__ : Optional[int] = []
for i in range(_UpperCAmelCase ):
lowerCamelCase__ : int = i * bytes_per_partition + 1
lowerCamelCase__ : Any = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(F"""{start_bytes}-{end_bytes}""" )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
@slow
def A_ ( self : Any ) -> int:
lowerCamelCase__ : int = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained('xlm-roberta-base' )
lowerCamelCase__ : Any = 'The dog is cute and lives in the garden house'
lowerCamelCase__ : Union[str, Any] = jnp.array([tokenizer.encode(UpperCAmelCase )] )
lowerCamelCase__ : Optional[int] = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase__ : str = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
lowerCamelCase__ : Union[str, Any] = model(UpperCAmelCase )['last_hidden_state']
self.assertEqual(output.shape , UpperCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , UpperCAmelCase , atol=1e-3 ) )
| 45
| 1
|
'''simple docstring'''
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 258
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, CycleDiffusionPipeline, DDIMScheduler, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCAmelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = CycleDiffusionPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''negative_prompt''',
'''height''',
'''width''',
'''negative_prompt_embeds''',
}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''source_prompt'''} )
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
__lowerCAmelCase = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A (self : int ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , num_train_timesteps=1000 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
A = CLIPTextModel(_lowerCAmelCase )
A = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A (self : Dict , _lowerCAmelCase : str , _lowerCAmelCase : List[str]=0 ):
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
A = image / 2 + 0.5
if str(_lowerCAmelCase ).startswith("""mps""" ):
A = torch.manual_seed(_lowerCAmelCase )
else:
A = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
A = {
"""prompt""": """An astronaut riding an elephant""",
"""source_prompt""": """An astronaut riding a horse""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""eta""": 0.1,
"""strength""": 0.8,
"""guidance_scale""": 3,
"""source_guidance_scale""": 1,
"""output_type""": """numpy""",
}
return inputs
def A (self : Any ):
A = """cpu""" # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.4_459, 0.4_943, 0.4_544, 0.6_643, 0.5_474, 0.4_327, 0.5_701, 0.5_959, 0.5_179] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def A (self : str ):
A = self.get_dummy_components()
for name, module in components.items():
if hasattr(_lowerCAmelCase , """half""" ):
A = module.half()
A = CycleDiffusionPipeline(**_lowerCAmelCase )
A = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
A = self.get_dummy_inputs(_lowerCAmelCase )
A = pipe(**_lowerCAmelCase )
A = output.images
A = images[0, -3:, -3:, -1]
assert images.shape == (1, 32, 32, 3)
A = np.array([0.3_506, 0.4_543, 0.446, 0.4_575, 0.5_195, 0.4_155, 0.5_273, 0.518, 0.4_116] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def A (self : Optional[int] ):
return super().test_save_load_local()
@unittest.skip("""non-deterministic pipeline""" )
def A (self : Optional[Any] ):
return super().test_inference_batch_single_identical()
@skip_mps
def A (self : Dict ):
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def A (self : Optional[Any] ):
return super().test_save_load_optional_components()
@skip_mps
def A (self : Optional[int] ):
return super().test_attention_slicing_forward_pass()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def A (self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car_fp16.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(
_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa , revision="""fp16""" )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
# the values aren't exactly equal, but the images look the same visually
assert np.abs(image - expected_image ).max() < 5e-1
def A (self : int ):
A = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/cycle-diffusion/black_colored_car.png""" )
A = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/cycle-diffusion/blue_colored_car.npy""" )
A = init_image.resize((512, 512) )
A = """CompVis/stable-diffusion-v1-4"""
A = DDIMScheduler.from_pretrained(_lowerCAmelCase , subfolder="""scheduler""" )
A = CycleDiffusionPipeline.from_pretrained(_lowerCAmelCase , scheduler=_lowerCAmelCase , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
A = """A black colored car"""
A = """A blue colored car"""
A = torch.manual_seed(0 )
A = pipe(
prompt=_lowerCAmelCase , source_prompt=_lowerCAmelCase , image=_lowerCAmelCase , num_inference_steps=100 , eta=0.1 , strength=0.85 , guidance_scale=3 , source_guidance_scale=1 , generator=_lowerCAmelCase , output_type="""np""" , )
A = output.images
assert np.abs(image - expected_image ).max() < 2e-2
| 258
| 1
|
'''simple docstring'''
from typing import Any
class UpperCamelCase__:
def __init__( self : Any , lowerCAmelCase : Any )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = data
UpperCAmelCase = None
class UpperCamelCase__:
def __init__( self : Optional[int] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = None
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=''' ''' )
UpperCAmelCase = temp.next
print()
def a__( self : Optional[Any] , lowerCAmelCase : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = Node(lowerCAmelCase )
UpperCAmelCase = self.head
UpperCAmelCase = new_node
def a__( self : Any , lowerCAmelCase : Any , lowerCAmelCase : Optional[int] )-> Optional[Any]:
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase = node_a.next
UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
UpperCAmelCase , UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
_lowercase : Any = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 363
|
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( A : str="" ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
return os.path.join(A , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = AgentAudio(lowerCAmelCase )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase = sf.read(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , torch.tensor(lowerCAmelCase ) , atol=1E-4 ) )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCAmelCase , lowerCAmelCase , 16000 )
UpperCAmelCase = AgentAudio(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase )
@require_vision
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.randint(0 , 256 , (64, 64, 3) )
UpperCAmelCase = AgentImage(lowerCAmelCase )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCAmelCase = Image.open(lowerCAmelCase )
UpperCAmelCase = AgentImage(lowerCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCAmelCase = Image.open(lowerCAmelCase )
UpperCAmelCase = AgentImage(lowerCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = '''Hey!'''
UpperCAmelCase = AgentText(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , agent_type.to_string() )
self.assertEqual(lowerCAmelCase , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 91
| 0
|
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self :Optional[int] , *snake_case :str , **snake_case :Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use SegformerImageProcessor instead." , snake_case , )
super().__init__(*snake_case , **snake_case )
| 300
|
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 42
__UpperCamelCase = 42
class __magic_name__ ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = 1
@register_to_config
def __init__( self :Union[str, Any] , snake_case :int = 2_000 , snake_case :float = 0.15 , snake_case :float = 0.01 , snake_case :float = 1348.0 , snake_case :float = 1e-5 , snake_case :int = 1 , ):
'''simple docstring'''
A_ : Dict = sigma_max
# setable values
A_ : List[Any] = None
self.set_sigmas(snake_case , snake_case , snake_case , snake_case )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :torch.FloatTensor , snake_case :Optional[int] = None ):
'''simple docstring'''
return sample
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :int , snake_case :float = None , snake_case :Union[str, torch.device] = None ):
'''simple docstring'''
A_ : Optional[Any] = sampling_eps if sampling_eps is not None else self.config.sampling_eps
A_ : Tuple = torch.linspace(1 , snake_case , snake_case , device=snake_case )
def SCREAMING_SNAKE_CASE ( self :Dict , snake_case :int , snake_case :float = None , snake_case :float = None , snake_case :float = None ):
'''simple docstring'''
A_ : Union[str, Any] = sigma_min if sigma_min is not None else self.config.sigma_min
A_ : Any = sigma_max if sigma_max is not None else self.config.sigma_max
A_ : Dict = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(snake_case , snake_case )
A_ : str = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
A_ : Any = torch.exp(torch.linspace(math.log(snake_case ) , math.log(snake_case ) , snake_case ) )
A_ : str = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def SCREAMING_SNAKE_CASE ( self :List[str] , snake_case :List[str] , snake_case :Dict ):
'''simple docstring'''
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :int , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
A_ : int = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
A_ : Optional[Any] = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
A_ : Dict = timesteps.to(self.discrete_sigmas.device )
A_ : Optional[int] = self.discrete_sigmas[timesteps].to(sample.device )
A_ : int = self.get_adjacent_sigma(snake_case , snake_case ).to(sample.device )
A_ : Union[str, Any] = torch.zeros_like(snake_case )
A_ : Tuple = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
A_ : Optional[int] = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
A_ : Tuple = diffusion.unsqueeze(-1 )
A_ : Optional[Any] = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
A_ : List[Any] = randn_tensor(
sample.shape , layout=sample.layout , generator=snake_case , device=sample.device , dtype=sample.dtype )
A_ : List[Any] = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
A_ : Any = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=snake_case , prev_sample_mean=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :Optional[torch.Generator] = None , snake_case :bool = True , ):
'''simple docstring'''
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
A_ : Dict = randn_tensor(sample.shape , layout=sample.layout , generator=snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
A_ : int = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
A_ : List[Any] = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
A_ : Dict = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
A_ : Dict = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
A_ : int = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
A_ : str = step_size.unsqueeze(-1 )
A_ : Optional[Any] = sample + step_size * model_output
A_ : Tuple = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def SCREAMING_SNAKE_CASE ( self :Tuple , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor , ):
'''simple docstring'''
A_ : Union[str, Any] = timesteps.to(original_samples.device )
A_ : List[Any] = self.discrete_sigmas.to(original_samples.device )[timesteps]
A_ : List[Any] = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(snake_case ) * sigmas[:, None, None, None]
)
A_ : Optional[int] = noise + original_samples
return noisy_samples
def __len__( self :Union[str, Any] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 300
| 1
|
"""simple docstring"""
from __future__ import annotations
def __lowercase ( _a , _a , _a , _a ):
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
snake_case_, snake_case_ : Tuple = array[indexa], array[indexa]
def __lowercase ( _a , _a , _a , _a ):
if length > 1:
snake_case_ : Optional[int] = int(length / 2 )
for i in range(_a , low + middle ):
comp_and_swap(_a , _a , i + middle , _a )
bitonic_merge(_a , _a , _a , _a )
bitonic_merge(_a , low + middle , _a , _a )
def __lowercase ( _a , _a , _a , _a ):
if length > 1:
snake_case_ : List[Any] = int(length / 2 )
bitonic_sort(_a , _a , _a , 1 )
bitonic_sort(_a , low + middle , _a , 0 )
bitonic_merge(_a , _a , _a , _a )
if __name__ == "__main__":
lowercase__ : Dict = input('''Enter numbers separated by a comma:\n''').strip()
lowercase__ : int = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 155
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowercase ( _a = 3 ):
if isinstance(_a , _a ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(_a ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 10:
raise ValueError('''number of qubits too large to simulate(>10).''' )
snake_case_ : Tuple = QuantumRegister(_a , '''qr''' )
snake_case_ : Optional[Any] = ClassicalRegister(_a , '''cr''' )
snake_case_ : Any = QuantumCircuit(_a , _a )
snake_case_ : int = number_of_qubits
for i in range(_a ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_a ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _a , _a )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_a , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_a , _a )
# simulate with 10000 shots
snake_case_ : Any = Aer.get_backend('''qasm_simulator''' )
snake_case_ : Optional[int] = execute(_a , _a , shots=10_000 )
return job.result().get_counts(_a )
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 155
| 1
|
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ : List[str] = logging.get_logger(__name__)
lowerCAmelCase__ : Dict = {
'kakaobrain/align-base': 'https://huggingface.co/kakaobrain/align-base/resolve/main/config.json',
}
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align_text_model"
def __init__( self : Tuple ,lowerCamelCase__ : str=30_522 ,lowerCamelCase__ : Tuple=768 ,lowerCamelCase__ : Optional[Any]=12 ,lowerCamelCase__ : int=12 ,lowerCamelCase__ : Union[str, Any]=3_072 ,lowerCamelCase__ : List[Any]="gelu" ,lowerCamelCase__ : str=0.1 ,lowerCamelCase__ : Dict=0.1 ,lowerCamelCase__ : Tuple=512 ,lowerCamelCase__ : int=2 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,lowerCamelCase__ : Optional[Any]=1e-12 ,lowerCamelCase__ : Any=0 ,lowerCamelCase__ : Optional[int]="absolute" ,lowerCamelCase__ : Dict=True ,**lowerCamelCase__ : Dict ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = position_embedding_type
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = pad_token_id
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : List[str] ):
cls._set_token_in_kwargs(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase__ = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align_vision_model"
def __init__( self : Any ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 600 ,lowerCamelCase__ : float = 2.0 ,lowerCamelCase__ : float = 3.1 ,lowerCamelCase__ : int = 8 ,lowerCamelCase__ : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowerCamelCase__ : List[int] = [32, 16, 24, 40, 80, 112, 192] ,lowerCamelCase__ : List[int] = [16, 24, 40, 80, 112, 192, 320] ,lowerCamelCase__ : List[int] = [] ,lowerCamelCase__ : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowerCamelCase__ : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowerCamelCase__ : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowerCamelCase__ : float = 0.2_5 ,lowerCamelCase__ : str = "swish" ,lowerCamelCase__ : int = 2_560 ,lowerCamelCase__ : str = "mean" ,lowerCamelCase__ : float = 0.0_2 ,lowerCamelCase__ : float = 0.0_0_1 ,lowerCamelCase__ : float = 0.9_9 ,lowerCamelCase__ : float = 0.2 ,**lowerCamelCase__ : Tuple ,):
super().__init__(**lowerCamelCase__ )
UpperCAmelCase__ = num_channels
UpperCAmelCase__ = image_size
UpperCAmelCase__ = width_coefficient
UpperCAmelCase__ = depth_coefficient
UpperCAmelCase__ = depth_divisor
UpperCAmelCase__ = kernel_sizes
UpperCAmelCase__ = in_channels
UpperCAmelCase__ = out_channels
UpperCAmelCase__ = depthwise_padding
UpperCAmelCase__ = strides
UpperCAmelCase__ = num_block_repeats
UpperCAmelCase__ = expand_ratios
UpperCAmelCase__ = squeeze_expansion_ratio
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dim
UpperCAmelCase__ = pooling_type
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = batch_norm_eps
UpperCAmelCase__ = batch_norm_momentum
UpperCAmelCase__ = drop_connect_rate
UpperCAmelCase__ = sum(lowerCamelCase__ ) * 4
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : Union[str, os.PathLike] ,**lowerCamelCase__ : Optional[int] ):
cls._set_token_in_kwargs(lowerCamelCase__ )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(lowerCamelCase__ ,**lowerCamelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
UpperCAmelCase__ = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(lowerCamelCase__ ,**lowerCamelCase__ )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
snake_case__ = "align"
snake_case__ = True
def __init__( self : Tuple ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Union[str, Any]=None ,lowerCamelCase__ : int=640 ,lowerCamelCase__ : List[Any]=1.0 ,lowerCamelCase__ : Optional[Any]=0.0_2 ,**lowerCamelCase__ : str ,):
super().__init__(**lowerCamelCase__ )
if text_config is None:
UpperCAmelCase__ = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
UpperCAmelCase__ = AlignTextConfig(**lowerCamelCase__ )
UpperCAmelCase__ = AlignVisionConfig(**lowerCamelCase__ )
UpperCAmelCase__ = projection_dim
UpperCAmelCase__ = temperature_init_value
UpperCAmelCase__ = initializer_range
@classmethod
def __lowerCAmelCase ( cls : str ,lowerCamelCase__ : AlignTextConfig ,lowerCamelCase__ : AlignVisionConfig ,**lowerCamelCase__ : Optional[int] ):
return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**lowerCamelCase__ )
def __lowerCAmelCase ( self : str ):
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 98
|
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
lowerCAmelCase__ : List[Any] =logging.get_logger(__name__)
def __lowercase ( a__ , a__ , a__ , a__ ) -> Tuple[int, int]:
def constraint_to_multiple_of(a__ , a__ , a__=0 , a__=None ):
__SCREAMING_SNAKE_CASE = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
__SCREAMING_SNAKE_CASE = math.floor(val / multiple ) * multiple
if x < min_val:
__SCREAMING_SNAKE_CASE = math.ceil(val / multiple ) * multiple
return x
__SCREAMING_SNAKE_CASE = (output_size, output_size) if isinstance(a__ , a__ ) else output_size
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_image_size(a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output_size
# determine new height and width
__SCREAMING_SNAKE_CASE = output_height / input_height
__SCREAMING_SNAKE_CASE = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
__SCREAMING_SNAKE_CASE = scale_width
else:
# fit height
__SCREAMING_SNAKE_CASE = scale_height
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_height * input_height , multiple=a__ )
__SCREAMING_SNAKE_CASE = constraint_to_multiple_of(scale_width * input_width , multiple=a__ )
return (new_height, new_width)
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : List[str] = ['''pixel_values''']
def __init__( self , _A = True , _A = None , _A = PILImageResampling.BILINEAR , _A = False , _A = 1 , _A = True , _A = 1 / 255 , _A = True , _A = None , _A = None , **_A , ):
'''simple docstring'''
super().__init__(**_A )
__SCREAMING_SNAKE_CASE = size if size is not None else {'height': 384, 'width': 384}
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _A ( self , _A , _A , _A = False , _A = 1 , _A = PILImageResampling.BICUBIC , _A = None , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = get_resize_output_image_size(
_A , output_size=(size['height'], size['width']) , keep_aspect_ratio=_A , multiple=_A , )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return rescale(_A , scale=_A , data_format=_A , **_A )
def _A ( self , _A , _A , _A , _A = None , **_A , ):
'''simple docstring'''
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _A ( self , _A , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = None , _A = ChannelDimension.FIRST , **_A , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(_A )
__SCREAMING_SNAKE_CASE = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
__SCREAMING_SNAKE_CASE = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(_A ) for image in images]
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_A , _A ) for image in images]
__SCREAMING_SNAKE_CASE = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A )
def _A ( self , _A , _A = None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_A ) != len(_A ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(_A ):
__SCREAMING_SNAKE_CASE = target_sizes.numpy()
__SCREAMING_SNAKE_CASE = []
for idx in range(len(_A ) ):
__SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='bilinear' , align_corners=_A )
__SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_A )
else:
__SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
__SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 257
| 0
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class a ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
snake_case_ = IFInpaintingPipeline
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
snake_case_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
snake_case_ = PipelineTesterMixin.required_optional_params - {"latents"}
def A_ ( self : str ):
return self._get_dummy_components()
def A_ ( self : Dict , lowercase_ : str , lowercase_ : Optional[int]=0 ):
if str(lowercase_ ).startswith('''mps''' ):
snake_case_ = torch.manual_seed(lowercase_ )
else:
snake_case_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
snake_case_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A_ ( self : Optional[Any] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def A_ ( self : Any ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def A_ ( self : Dict ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1e-1 )
def A_ ( self : Dict ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def A_ ( self : Any ):
self._test_save_load_local()
def A_ ( self : Optional[Any] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 351
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a : int = {
'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'],
'tokenization_xlm': ['XLMTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMForMultipleChoice',
'XLMForQuestionAnswering',
'XLMForQuestionAnsweringSimple',
'XLMForSequenceClassification',
'XLMForTokenClassification',
'XLMModel',
'XLMPreTrainedModel',
'XLMWithLMHeadModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMForMultipleChoice',
'TFXLMForQuestionAnsweringSimple',
'TFXLMForSequenceClassification',
'TFXLMForTokenClassification',
'TFXLMMainLayer',
'TFXLMModel',
'TFXLMPreTrainedModel',
'TFXLMWithLMHeadModel',
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 72
| 0
|
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : torch.FloatTensor
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self , _a = 3 , _a = 3 , _a = ("DownEncoderBlock2D",) , _a = ("UpDecoderBlock2D",) , _a = (64,) , _a = 1 , _a = "silu" , _a = 3 , _a = 32 , _a = 256 , _a = 32 , _a = None , _a = 0.1_8215 , _a = "group" , ):
super().__init__()
# pass init params to Encoder
__a = Encoder(
in_channels=_a , out_channels=_a , down_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , double_z=_a , )
__a = vq_embed_dim if vq_embed_dim is not None else latent_channels
__a = nn.Convad(_a , _a , 1 )
__a = VectorQuantizer(_a , _a , beta=0.25 , remap=_a , sane_index_shape=_a )
__a = nn.Convad(_a , _a , 1 )
# pass init params to Decoder
__a = Decoder(
in_channels=_a , out_channels=_a , up_block_types=_a , block_out_channels=_a , layers_per_block=_a , act_fn=_a , norm_num_groups=_a , norm_type=_a , )
@apply_forward_hook
def __UpperCAmelCase ( self , _a , _a = True ):
__a = self.encoder(_a )
__a = self.quant_conv(_a )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=_a )
@apply_forward_hook
def __UpperCAmelCase ( self , _a , _a = False , _a = True ):
# also go through quantization layer
if not force_not_quantize:
__a , __a , __a = self.quantize(_a )
else:
__a = h
__a = self.post_quant_conv(_a )
__a = self.decoder(_a , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
def __UpperCAmelCase ( self , _a , _a = True ):
__a = sample
__a = self.encode(_a ).latents
__a = self.decode(_a ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_a )
| 45
|
"""simple docstring"""
def lowercase ( lowerCAmelCase__ : int ) -> str:
if number > 0:
raise ValueError('''input must be a negative integer''' )
__a = len(bin(lowerCAmelCase__ )[3:] )
__a = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
__a = (
(
'''1'''
+ '''0''' * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 45
| 1
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __snake_case ( _UpperCamelCase ):
lowerCAmelCase_ = 'sew'
def __init__( self : int , _lowercase : List[str]=32 , _lowercase : Tuple=7_68 , _lowercase : List[str]=12 , _lowercase : str=12 , _lowercase : List[str]=30_72 , _lowercase : List[str]=2 , _lowercase : str="gelu" , _lowercase : int=0.1 , _lowercase : Dict=0.1 , _lowercase : Optional[Any]=0.1 , _lowercase : Dict=0.0 , _lowercase : Any=0.1 , _lowercase : str=0.1 , _lowercase : Any=0.02 , _lowercase : Optional[Any]=1E-5 , _lowercase : Optional[Any]="group" , _lowercase : List[Any]="gelu" , _lowercase : List[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , _lowercase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _lowercase : List[str]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _lowercase : str=False , _lowercase : str=1_28 , _lowercase : List[Any]=16 , _lowercase : str=True , _lowercase : Optional[int]=0.05 , _lowercase : List[Any]=10 , _lowercase : List[str]=2 , _lowercase : str=0.0 , _lowercase : Optional[Any]=10 , _lowercase : str=0 , _lowercase : str="mean" , _lowercase : Union[str, Any]=False , _lowercase : Tuple=False , _lowercase : List[Any]=2_56 , _lowercase : List[str]=0 , _lowercase : Optional[int]=1 , _lowercase : Union[str, Any]=2 , **_lowercase : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase , pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = feat_extract_norm
SCREAMING_SNAKE_CASE__ = feat_extract_activation
SCREAMING_SNAKE_CASE__ = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = list(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = conv_bias
SCREAMING_SNAKE_CASE__ = num_conv_pos_embeddings
SCREAMING_SNAKE_CASE__ = num_conv_pos_embedding_groups
SCREAMING_SNAKE_CASE__ = len(self.conv_dim )
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = squeeze_factor
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = activation_dropout
SCREAMING_SNAKE_CASE__ = feat_proj_dropout
SCREAMING_SNAKE_CASE__ = final_dropout
SCREAMING_SNAKE_CASE__ = layerdrop
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
SCREAMING_SNAKE_CASE__ = apply_spec_augment
SCREAMING_SNAKE_CASE__ = mask_time_prob
SCREAMING_SNAKE_CASE__ = mask_time_length
SCREAMING_SNAKE_CASE__ = mask_time_min_masks
SCREAMING_SNAKE_CASE__ = mask_feature_prob
SCREAMING_SNAKE_CASE__ = mask_feature_length
SCREAMING_SNAKE_CASE__ = mask_feature_min_masks
# ctc loss
SCREAMING_SNAKE_CASE__ = ctc_loss_reduction
SCREAMING_SNAKE_CASE__ = ctc_zero_infinity
# sequence classification
SCREAMING_SNAKE_CASE__ = use_weighted_layer_sum
SCREAMING_SNAKE_CASE__ = classifier_proj_size
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 369
|
import os
from argparse import ArgumentParser
from typing import List
import torch.utils.data
from datasets import Dataset, IterableDataset
from datasets.distributed import split_dataset_by_node
__lowerCamelCase : int = 4
__lowerCamelCase : Dict = 3
class __snake_case ( lowerCamelCase_ ):
pass
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
for shard in shards:
for i in range(__UpperCamelCase ):
yield {"i": i, "shard": shard}
def __SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = int(os.environ["""RANK"""] )
SCREAMING_SNAKE_CASE__ = int(os.environ["""WORLD_SIZE"""] )
SCREAMING_SNAKE_CASE__ = ArgumentParser()
parser.add_argument("""--streaming""" , type=__UpperCamelCase )
parser.add_argument("""--local_rank""" , type=__UpperCamelCase )
parser.add_argument("""--num_workers""" , type=__UpperCamelCase , default=0 )
SCREAMING_SNAKE_CASE__ = parser.parse_args()
SCREAMING_SNAKE_CASE__ = args.streaming
SCREAMING_SNAKE_CASE__ = args.num_workers
SCREAMING_SNAKE_CASE__ = {"""shards""": [f"""shard_{shard_idx}""" for shard_idx in range(__UpperCamelCase )]}
SCREAMING_SNAKE_CASE__ = IterableDataset.from_generator(__UpperCamelCase , gen_kwargs=__UpperCamelCase )
if not streaming:
SCREAMING_SNAKE_CASE__ = Dataset.from_list(list(__UpperCamelCase ) )
SCREAMING_SNAKE_CASE__ = split_dataset_by_node(__UpperCamelCase , rank=__UpperCamelCase , world_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = torch.utils.data.DataLoader(__UpperCamelCase , num_workers=__UpperCamelCase )
SCREAMING_SNAKE_CASE__ = NUM_SHARDS * NUM_ITEMS_PER_SHARD
SCREAMING_SNAKE_CASE__ = full_size // world_size
expected_local_size += int(rank < (full_size % world_size) )
SCREAMING_SNAKE_CASE__ = sum(1 for _ in dataloader )
if local_size != expected_local_size:
raise FailedTestError(f"""local_size {local_size} != expected_local_size {expected_local_size}""" )
if __name__ == "__main__":
main()
| 204
| 0
|
"""simple docstring"""
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class A_ (UpperCAmelCase__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = """openai/whisper-base"""
SCREAMING_SNAKE_CASE__ : int = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
SCREAMING_SNAKE_CASE__ : Dict = """transcriber"""
SCREAMING_SNAKE_CASE__ : Dict = WhisperProcessor
SCREAMING_SNAKE_CASE__ : List[str] = WhisperForConditionalGeneration
SCREAMING_SNAKE_CASE__ : Dict = ["""audio"""]
SCREAMING_SNAKE_CASE__ : Optional[int] = ["""text"""]
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.pre_processor(lowercase_ , return_tensors="pt" ).input_features
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.model.generate(inputs=lowercase_ )
def UpperCamelCase__ ( self , lowercase_ ):
"""simple docstring"""
return self.pre_processor.batch_decode(lowercase_ , skip_special_tokens=lowercase_ )[0]
| 61
|
"""simple docstring"""
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
UpperCAmelCase_ : Optional[int] = logging.get_logger(__name__)
def _A (__a , __a ) -> Tuple:
"""simple docstring"""
try:
with open(__a , '''rb''' ) as flax_state_f:
SCREAMING_SNAKE_CASE_ : Optional[int] = from_bytes(__a , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(__a ) as f:
if f.read().startswith('''version''' ):
raise OSError(
'''You seem to have cloned a repository without having git-lfs installed. Please'''
''' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'''
''' folder you cloned.''' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(f'Unable to convert {model_file} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(__a , __a )
def _A (__a , __a ) -> Tuple:
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'''Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'''
''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'''
''' instructions.''' )
raise
# check if we have bf16 weights
SCREAMING_SNAKE_CASE_ : Optional[int] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values()
if any(__a ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '''
'''before loading those in PyTorch model.''' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = jax.tree_util.tree_map(
lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a )
SCREAMING_SNAKE_CASE_ : int = ''''''
SCREAMING_SNAKE_CASE_ : str = flatten_dict(__a , sep='''.''' )
SCREAMING_SNAKE_CASE_ : List[Any] = pt_model.state_dict()
# keep track of unexpected & missing keys
SCREAMING_SNAKE_CASE_ : str = []
SCREAMING_SNAKE_CASE_ : Any = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple.split('''.''' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
SCREAMING_SNAKE_CASE_ : Any = flax_key_tuple_array[:-1] + ['''weight''']
SCREAMING_SNAKE_CASE_ : Optional[Any] = jnp.transpose(__a , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
SCREAMING_SNAKE_CASE_ : Tuple = flax_key_tuple_array[:-1] + ['''weight''']
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
SCREAMING_SNAKE_CASE_ : Optional[int] = flax_key_tuple_array[:-1] + ['''weight''']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(__a ):
SCREAMING_SNAKE_CASE_ : List[str] = (
flax_key_tuple_string.replace('''_0''' , '''.0''' )
.replace('''_1''' , '''.1''' )
.replace('''_2''' , '''.2''' )
.replace('''_3''' , '''.3''' )
.replace('''_4''' , '''.4''' )
.replace('''_5''' , '''.5''' )
.replace('''_6''' , '''.6''' )
.replace('''_7''' , '''.7''' )
.replace('''_8''' , '''.8''' )
.replace('''_9''' , '''.9''' )
)
SCREAMING_SNAKE_CASE_ : Optional[Any] = '''.'''.join(__a )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
SCREAMING_SNAKE_CASE_ : Optional[int] = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.from_numpy(__a )
# remove from missing keys
missing_keys.remove(__a )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(__a )
pt_model.load_state_dict(__a )
# re-transform missing_keys to list
SCREAMING_SNAKE_CASE_ : int = list(__a )
if len(__a ) > 0:
logger.warning(
'''Some weights of the Flax model were not used when initializing the PyTorch model'''
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'''
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'''
''' FlaxBertForSequenceClassification model).''' )
if len(__a ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
''' use it for predictions and inference.''' )
return pt_model
| 91
| 0
|
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
UpperCAmelCase__ = "__DUMMY_TRANSFORMERS_USER__"
UpperCAmelCase__ = "Dummy User"
UpperCAmelCase__ = "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"
UpperCAmelCase__ = "https://hub-ci.huggingface.co"
UpperCAmelCase__ = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
UpperCAmelCase__ = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
UpperCAmelCase__ = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def A ( _UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
monkeypatch.setattr(
'huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE' , _UpperCAmelCase )
@pytest.fixture
def A ( _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
monkeypatch.setattr('datasets.config.HF_ENDPOINT' , _UpperCAmelCase )
monkeypatch.setattr('datasets.config.HUB_DATASETS_URL' , _UpperCAmelCase )
@pytest.fixture
def A ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
monkeypatch.setattr('huggingface_hub.hf_api.HfFolder.path_token' , _UpperCAmelCase )
@pytest.fixture
def A ( _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
HfFolder.save_token(_UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope='session' )
def A ( ) -> Union[str, Any]:
'''simple docstring'''
return HfApi(endpoint=_UpperCAmelCase )
@pytest.fixture(scope='session' )
def A ( _UpperCAmelCase : HfApi ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = HfFolder.get_token()
HfFolder.save_token(_UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(_UpperCAmelCase )
@pytest.fixture
def A ( _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
def _cleanup_repo(_UpperCAmelCase : int ):
hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='dataset' )
return _cleanup_repo
@pytest.fixture
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
@contextmanager
def _temporary_repo(_UpperCAmelCase : List[str] ):
try:
yield repo_id
finally:
cleanup_repo(_UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope='session' )
def A ( _UpperCAmelCase : HfApi , _UpperCAmelCase : str , _UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = F"repo_txt_data-{int(time.time() * 10E3 )}"
_UpperCAmelCase = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='dataset' , private=_UpperCAmelCase )
hf_api.upload_file(
token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='data/text_data.txt' , repo_id=_UpperCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope='session' )
def A ( _UpperCAmelCase : HfApi , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = F"repo_zipped_txt_data-{int(time.time() * 10E3 )}"
_UpperCAmelCase = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='dataset' , private=_UpperCAmelCase )
hf_api.upload_file(
token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='data.zip' , repo_id=_UpperCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( _UpperCAmelCase : str , _UpperCAmelCase : Any , _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope='session' )
def A ( _UpperCAmelCase : HfApi , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
_UpperCAmelCase = F"repo_zipped_img_data-{int(time.time() * 10E3 )}"
_UpperCAmelCase = F"{CI_HUB_USER}/{repo_name}"
hf_api.create_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='dataset' , private=_UpperCAmelCase )
hf_api.upload_file(
token=_UpperCAmelCase , path_or_fileobj=str(_UpperCAmelCase ) , path_in_repo='data.zip' , repo_id=_UpperCAmelCase , repo_type='dataset' , )
yield repo_id
try:
hf_api.delete_repo(_UpperCAmelCase , token=_UpperCAmelCase , repo_type='dataset' )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def A ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 290
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase__ = {
"configuration_deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaOnnxConfig"],
"tokenization_deberta": ["DebertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["DebertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"DebertaForMaskedLM",
"DebertaForQuestionAnswering",
"DebertaForSequenceClassification",
"DebertaForTokenClassification",
"DebertaModel",
"DebertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDebertaForMaskedLM",
"TFDebertaForQuestionAnswering",
"TFDebertaForSequenceClassification",
"TFDebertaForTokenClassification",
"TFDebertaModel",
"TFDebertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( _a ):
_a = 'roc_bert'
def __init__( self : List[str] , lowerCAmelCase : Optional[int]=3_0522 , lowerCAmelCase : int=768 , lowerCAmelCase : Optional[int]=12 , lowerCAmelCase : int=12 , lowerCAmelCase : Optional[int]=3072 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Any=0.1 , lowerCAmelCase : Tuple=512 , lowerCAmelCase : Optional[int]=2 , lowerCAmelCase : List[str]=0.02 , lowerCAmelCase : Optional[Any]=1e-12 , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : int="absolute" , lowerCAmelCase : str=None , lowerCAmelCase : Dict=True , lowerCAmelCase : Optional[int]=True , lowerCAmelCase : Dict=768 , lowerCAmelCase : List[str]=910 , lowerCAmelCase : Dict=512 , lowerCAmelCase : Union[str, Any]=2_4858 , lowerCAmelCase : Union[str, Any]=True , **lowerCAmelCase : Optional[int] , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = initializer_range
lowerCAmelCase = type_vocab_size
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = use_cache
lowerCAmelCase = enable_pronunciation
lowerCAmelCase = enable_shape
lowerCAmelCase = pronunciation_embed_dim
lowerCAmelCase = pronunciation_vocab_size
lowerCAmelCase = shape_embed_dim
lowerCAmelCase = shape_vocab_size
lowerCAmelCase = concat_input
lowerCAmelCase = position_embedding_type
lowerCAmelCase = classifier_dropout
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
| 155
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = [
('bert.bert', 'visual_bert'),
('bert.cls', 'cls'),
('bert.classifier', 'cls'),
('token_type_embeddings_visual', 'visual_token_type_embeddings'),
('position_embeddings_visual', 'visual_position_embeddings'),
('projection', 'visual_projection'),
]
a = [
'nlvr2_coco_pre_trained.th',
'nlvr2_fine_tuned.th',
'nlvr2_pre_trained.th',
'vcr_coco_pre_train.th',
'vcr_fine_tune.th',
'vcr_pre_train.th',
'vqa_coco_pre_trained.th',
'vqa_fine_tuned.th',
'vqa_pre_trained.th',
]
def lowercase (snake_case__ : Dict ) -> str:
'''simple docstring'''
lowerCAmelCase = torch.load(snake_case__ , map_location="""cpu""" )
return sd
def lowercase (snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : Union[str, Any]=rename_keys_prefix ) -> Dict:
'''simple docstring'''
lowerCAmelCase = OrderedDict()
lowerCAmelCase = torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowerCAmelCase = key
for name_pair in rename_keys_prefix:
lowerCAmelCase = new_key.replace(name_pair[0] , name_pair[1] )
lowerCAmelCase = d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowerCAmelCase = new_d["""cls.predictions.bias"""]
return new_d
@torch.no_grad()
def lowercase (snake_case__ : List[Any] , snake_case__ : Optional[int] ) -> List[str]:
'''simple docstring'''
assert (
checkpoint_path.split("""/""" )[-1] in ACCEPTABLE_CHECKPOINTS
), f'''The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'''
# Get Config
if "pre" in checkpoint_path:
lowerCAmelCase = """pretraining"""
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 1_024}
else:
raise NotImplementedError(f'''No implementation found for `{checkpoint_path}`.''' )
else:
if "vcr" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 512}
lowerCAmelCase = """multichoice"""
elif "vqa_advanced" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048}
lowerCAmelCase = """vqa_advanced"""
elif "vqa" in checkpoint_path:
lowerCAmelCase = {"""visual_embedding_dim""": 2_048, """num_labels""": 3_129}
lowerCAmelCase = """vqa"""
elif "nlvr" in checkpoint_path:
lowerCAmelCase = {
"""visual_embedding_dim""": 1_024,
"""num_labels""": 2,
}
lowerCAmelCase = """nlvr"""
lowerCAmelCase = VisualBertConfig(**snake_case__ )
# Load State Dict
lowerCAmelCase = load_state_dict(snake_case__ )
lowerCAmelCase = get_new_dict(snake_case__ , snake_case__ )
if model_type == "pretraining":
lowerCAmelCase = VisualBertForPreTraining(snake_case__ )
elif model_type == "vqa":
lowerCAmelCase = VisualBertForQuestionAnswering(snake_case__ )
elif model_type == "nlvr":
lowerCAmelCase = VisualBertForVisualReasoning(snake_case__ )
elif model_type == "multichoice":
lowerCAmelCase = VisualBertForMultipleChoice(snake_case__ )
model.load_state_dict(snake_case__ )
# Save Checkpoints
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
model.save_pretrained(snake_case__ )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
a = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 155
| 1
|
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : str = IFInpaintingPipeline
a : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"}
a : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a : List[str] = PipelineTesterMixin.required_optional_params - {"latents"}
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
return self._get_dummy_components()
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase=0 ) -> Dict:
'''simple docstring'''
if str(lowercase_ ).startswith('''mps''' ):
__lowercase = torch.manual_seed(lowercase_ )
else:
__lowercase = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
__lowercase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowercase_ ) ).to(lowercase_ )
__lowercase = floats_tensor((1, 3, 32, 32) ,rng=random.Random(lowercase_ ) ).to(lowercase_ )
__lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def _UpperCAmelCase (self ) -> int:
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''' )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1E-1 )
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
self._test_save_load_local()
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
| 356
|
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def _lowerCAmelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[str] ):
__lowercase = set()
# keep track of all the paths to be checked
__lowercase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
__lowercase = queue.pop(0 )
# get the last node from the path
__lowercase = path[-1]
if node not in explored:
__lowercase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
__lowercase = list(lowerCamelCase_ )
new_path.append(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCamelCase_ )
# in case there's no path between the 2 nodes
return []
def _lowerCAmelCase ( lowerCamelCase_ : dict , lowerCamelCase_ : str , lowerCamelCase_ : str ):
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
__lowercase = [start]
__lowercase = set(lowerCamelCase_ )
# Keep tab on distances from `start` node.
__lowercase = {start: 0, target: -1}
while queue:
__lowercase = queue.pop(0 )
if node == target:
__lowercase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCamelCase_ )
queue.append(lowerCamelCase_ )
__lowercase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 217
| 0
|
'''simple docstring'''
from collections.abc import Iterable
from typing import Generic, TypeVar
_lowerCAmelCase = TypeVar('''_T''')
class lowerCAmelCase_( Generic[_T] ):
'''simple docstring'''
def __init__( self ,__UpperCAmelCase = None ) -> Any:
lowerCAmelCase__ : list[_T] = list(iterable or [] )
lowerCAmelCase__ : list[_T] = []
def __len__( self ) -> List[str]:
return len(self._stacka ) + len(self._stacka )
def __repr__( self ) -> Dict:
return F"""Queue({tuple(self._stacka[::-1] + self._stacka )})"""
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> Tuple:
self._stacka.append(__lowerCAmelCase )
def UpperCAmelCase_ ( self ) -> Any:
lowerCAmelCase__ : Any = self._stacka.pop
lowerCAmelCase__ : Any = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("""Queue is empty""" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 37
|
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __snake_case ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : List[Any] = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
'''unet/diffusion_pytorch_model.bin''',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.bin''',
'''safety_checker/model.safetensors''',
'''vae/diffusion_pytorch_model.bin''',
'''vae/diffusion_pytorch_model.safetensors''',
'''text_encoder/pytorch_model.bin''',
# Removed: 'text_encoder/model.safetensors',
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Optional[int] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
_lowerCamelCase : str = [
'''unet/diffusion_pytorch_model.bin''',
'''unet/diffusion_pytorch_model.safetensors''',
]
_lowerCamelCase : Optional[Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
_lowerCamelCase : Tuple = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
'''unet/diffusion_pytorch_model.fp16.bin''',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_lowerCamelCase : Any = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : str ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.fp16.bin''',
'''text_encoder/model.fp16.safetensors''',
]
_lowerCamelCase : str = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
"""simple docstring"""
_lowerCamelCase : Optional[Any] = [
'''text_encoder/pytorch_model.bin''',
'''text_encoder/model.safetensors''',
]
_lowerCamelCase : Union[str, Any] = '''fp16'''
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
_lowerCamelCase : int = [
'''safety_checker/pytorch_model.fp16.bin''',
'''safety_checker/model.fp16.safetensors''',
'''vae/diffusion_pytorch_model.fp16.bin''',
'''vae/diffusion_pytorch_model.fp16.safetensors''',
'''text_encoder/pytorch_model.fp16.bin''',
# 'text_encoder/model.fp16.safetensors',
'''unet/diffusion_pytorch_model.fp16.bin''',
'''unet/diffusion_pytorch_model.fp16.safetensors''',
]
_lowerCamelCase : int = '''fp16'''
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 72
| 0
|
import os
import sys
import unittest
__a :List[Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__a :Optional[Any] = os.path.join(git_repo_path, 'src', 'diffusers')
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[Any] ):
A_ = find_backend(" if not is_torch_available():" )
self.assertEqual(lowerCAmelCase_ , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
A_ = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(lowerCAmelCase_ , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
A_ = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(lowerCAmelCase_ , "torch_and_transformers_and_onnx" )
def __A ( self : Optional[Any] ):
A_ = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , lowerCAmelCase_ )
self.assertIn("torch_and_transformers" , lowerCAmelCase_ )
self.assertIn("flax_and_transformers" , lowerCAmelCase_ )
self.assertIn("torch_and_transformers_and_onnx" , lowerCAmelCase_ )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def __A ( self : Any ):
A_ = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(lowerCAmelCase_ , "\nCONSTANT = None\n" )
A_ = create_dummy_object("function" , "'torch'" )
self.assertEqual(
lowerCAmelCase_ , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
A_ = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
A_ = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( self : Tuple ):
A_ = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
A_ = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , lowerCAmelCase_ )
| 365
|
import itertools
import math
def __snake_case ( __UpperCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __snake_case ( ):
"""simple docstring"""
A_ = 2
while True:
if is_prime(__UpperCamelCase ):
yield num
num += 1
def __snake_case ( __UpperCamelCase : int = 1_0001 ):
"""simple docstring"""
return next(itertools.islice(prime_generator() ,nth - 1 ,__UpperCamelCase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 329
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A__ : List[str] = logging.get_logger(__name__)
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ):
_a = '''maskformer-swin'''
_a = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Union[str, Any] , A_ : Dict=2_2_4 , A_ : Optional[Any]=4 , A_ : List[str]=3 , A_ : str=9_6 , A_ : Optional[Any]=[2, 2, 6, 2] , A_ : Tuple=[3, 6, 1_2, 2_4] , A_ : List[Any]=7 , A_ : List[Any]=4.0 , A_ : List[str]=True , A_ : Dict=0.0 , A_ : int=0.0 , A_ : str=0.1 , A_ : Optional[int]="gelu" , A_ : List[Any]=False , A_ : int=0.02 , A_ : int=1e-5 , A_ : Optional[int]=None , A_ : List[str]=None , **A_ : List[Any] , ):
super().__init__(**A_)
lowerCAmelCase_ : Dict = image_size
lowerCAmelCase_ : Optional[Any] = patch_size
lowerCAmelCase_ : Tuple = num_channels
lowerCAmelCase_ : Any = embed_dim
lowerCAmelCase_ : List[str] = depths
lowerCAmelCase_ : Union[str, Any] = len(A_)
lowerCAmelCase_ : List[str] = num_heads
lowerCAmelCase_ : Dict = window_size
lowerCAmelCase_ : Optional[int] = mlp_ratio
lowerCAmelCase_ : Dict = qkv_bias
lowerCAmelCase_ : str = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[int] = drop_path_rate
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : str = use_absolute_embeddings
lowerCAmelCase_ : List[str] = layer_norm_eps
lowerCAmelCase_ : int = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowerCAmelCase_ : str = int(embed_dim * 2 ** (len(A_) - 1))
lowerCAmelCase_ : Optional[Any] = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(A_) + 1)]
lowerCAmelCase_ , lowerCAmelCase_ : int = get_aligned_output_features_output_indices(
out_features=A_ , out_indices=A_ , stage_names=self.stage_names)
| 103
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _SCREAMING_SNAKE_CASE ( lowercase : Union[dict, list, tuple, torch.Tensor] ):
'''simple docstring'''
lowerCamelCase_ = []
if isinstance(lowercase , lowercase ):
for v in tree.values():
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(lowercase ) )
elif isinstance(lowercase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('Not supported' )
return shapes
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : int , lowercase : Tuple[int, ...] ):
'''simple docstring'''
lowerCamelCase_ = []
for d in reversed(lowercase ):
idx.append(flat_idx % d )
lowerCamelCase_ = flat_idx // d
return tuple(reversed(lowercase ) )
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Sequence[int] , lowercase : Optional[Sequence[bool]] = None , lowercase : Optional[Sequence[bool]] = None , ):
'''simple docstring'''
def reduce_edge_list(lowercase : List[bool] ) -> None:
lowerCamelCase_ = True
for i in range(len(lowercase ) ):
lowerCamelCase_ = -1 * (i + 1)
l[reversed_idx] &= tally
lowerCamelCase_ = l[reversed_idx]
if start_edges is None:
lowerCamelCase_ = [s == 0 for s in start]
reduce_edge_list(lowercase )
if end_edges is None:
lowerCamelCase_ = [e == (d - 1) for e, d in zip(lowercase , lowercase )]
reduce_edge_list(lowercase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(lowercase ) == 0:
return [()]
elif len(lowercase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
lowerCamelCase_ = []
lowerCamelCase_ = []
# Dimensions common to start and end can be selected directly
for s, e in zip(lowercase , lowercase ):
if s == e:
path_list.append(slice(lowercase , s + 1 ) )
else:
break
lowerCamelCase_ = tuple(lowercase )
lowerCamelCase_ = len(lowercase )
# start == end, and we're done
if divergence_idx == len(lowercase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ = start[divergence_idx]
return tuple(
path + (slice(lowercase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
lowerCamelCase_ = end[divergence_idx]
return tuple(
path + (slice(lowercase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
lowerCamelCase_ = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _SCREAMING_SNAKE_CASE ( lowercase : torch.Tensor , lowercase : int , lowercase : int , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = t.shape[:no_batch_dims]
lowerCamelCase_ = list(_flat_idx_to_idx(lowercase , lowercase ) )
# _get_minimal_slice_set is inclusive
lowerCamelCase_ = list(_flat_idx_to_idx(flat_end - 1 , lowercase ) )
# Get an ordered list of slices to perform
lowerCamelCase_ = _get_minimal_slice_set(
lowercase , lowercase , lowercase , )
lowerCamelCase_ = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _SCREAMING_SNAKE_CASE ( lowercase : Callable , lowercase : Dict[str, Any] , lowercase : int , lowercase : int , lowercase : bool = False , lowercase : Any = None , lowercase : bool = False , ):
'''simple docstring'''
if not (len(lowercase ) > 0):
raise ValueError('Must provide at least one input' )
lowerCamelCase_ = [shape[:no_batch_dims] for shape in _fetch_dims(lowercase )]
lowerCamelCase_ = tuple([max(lowercase ) for s in zip(*lowercase )] )
def _prep_inputs(lowercase : torch.Tensor ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
lowerCamelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
lowerCamelCase_ = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
lowerCamelCase_ = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
lowerCamelCase_ = tensor_tree_map(_prep_inputs , lowercase )
lowerCamelCase_ = None
if _out is not None:
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
lowerCamelCase_ = 1
for d in orig_batch_dims:
flat_batch_dim *= d
lowerCamelCase_ = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(lowercase : torch.Tensor ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
lowerCamelCase_ = 0
lowerCamelCase_ = prepped_outputs
for _ in range(lowercase ):
# Chunk the input
if not low_mem:
lowerCamelCase_ = _select_chunk
else:
lowerCamelCase_ = partial(
_chunk_slice , flat_start=lowercase , flat_end=min(lowercase , i + chunk_size ) , no_batch_dims=len(lowercase ) , )
lowerCamelCase_ = tensor_tree_map(lowercase , lowercase )
# Run the layer on the chunk
lowerCamelCase_ = layer(**lowercase )
# Allocate space for the output
if out is None:
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , lowercase )
# Put the chunk in its pre-allocated space
if isinstance(lowercase , lowercase ):
def assign(lowercase : dict , lowercase : dict ) -> None:
for k, v in da.items():
if isinstance(lowercase , lowercase ):
assign(lowercase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
lowerCamelCase_ = da[k]
assign(lowercase , lowercase )
elif isinstance(lowercase , lowercase ):
for xa, xa in zip(lowercase , lowercase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
lowerCamelCase_ = xa
elif isinstance(lowercase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
lowerCamelCase_ = output_chunk
else:
raise ValueError('Not supported' )
i += chunk_size
lowerCamelCase_ = tensor_tree_map(lambda lowercase : t.view(orig_batch_dims + t.shape[1:] ) , lowercase )
return out
class A:
'''simple docstring'''
def __init__( self : Optional[Any] , A_ : int = 512 , ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ = max_chunk_size
lowerCamelCase_ = None
lowerCamelCase_ = None
def a__ ( self : int , A_ : Callable , A_ : tuple , A_ : int ) -> int:
"""simple docstring"""
logging.info('Tuning chunk size...' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
lowerCamelCase_ = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
lowerCamelCase_ = [c for c in candidates if c > min_chunk_size]
lowerCamelCase_ = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(A_ : int ) -> bool:
try:
with torch.no_grad():
fn(*A_ , chunk_size=A_ )
return True
except RuntimeError:
return False
lowerCamelCase_ = 0
lowerCamelCase_ = len(A_ ) - 1
while i > min_viable_chunk_size_index:
lowerCamelCase_ = test_chunk_size(candidates[i] )
if not viable:
lowerCamelCase_ = (min_viable_chunk_size_index + i) // 2
else:
lowerCamelCase_ = i
lowerCamelCase_ = (i + len(A_ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def a__ ( self : List[str] , A_ : Iterable , A_ : Iterable ) -> bool:
"""simple docstring"""
lowerCamelCase_ = True
for aa, aa in zip(A_ , A_ ):
assert type(A_ ) == type(A_ )
if isinstance(A_ , (list, tuple) ):
consistent &= self._compare_arg_caches(A_ , A_ )
elif isinstance(A_ , A_ ):
lowerCamelCase_ = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
lowerCamelCase_ = [v for _, v in sorted(aa.items() , key=lambda A_ : x[0] )]
consistent &= self._compare_arg_caches(A_ , A_ )
else:
consistent &= aa == aa
return consistent
def a__ ( self : int , A_ : Callable , A_ : tuple , A_ : int , ) -> int:
"""simple docstring"""
lowerCamelCase_ = True
lowerCamelCase_ = tree_map(lambda A_ : a.shape if isinstance(A_ , torch.Tensor ) else a , A_ , A_ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(A_ )
lowerCamelCase_ = self._compare_arg_caches(self.cached_arg_data , A_ )
else:
# Otherwise, we can reuse the precomputed value
lowerCamelCase_ = False
if not consistent:
lowerCamelCase_ = self._determine_favorable_chunk_size(
A_ , A_ , A_ , )
lowerCamelCase_ = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 204
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCamelCase_ : Dict = logging.get_logger(__name__)
def lowerCAmelCase( __lowerCamelCase ):
if isinstance(__lowerCamelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCamelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCamelCase ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class a__ ( __snake_case ):
A__ : str = ['pixel_values']
def __init__( self , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = True , UpperCAmelCase = 1 / 2_5_5 , UpperCAmelCase = True , UpperCAmelCase = True , UpperCAmelCase = None , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
super().__init__(**UpperCAmelCase )
__a = size if size is not None else {'shortest_edge': 2_5_6}
__a = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
__a = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__a = get_size_dict(UpperCAmelCase , param_name='crop_size' )
__a = do_resize
__a = size
__a = do_center_crop
__a = crop_size
__a = resample
__a = do_rescale
__a = rescale_factor
__a = offset
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__a = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = PILImageResampling.BILINEAR , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
__a = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" in size:
__a = get_resize_output_image_size(UpperCAmelCase , size['shortest_edge'] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
__a = (size['height'], size['width'])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
__a = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True , UpperCAmelCase = None , **UpperCAmelCase , ) -> Optional[int]:
__a = image.astype(np.floataa )
if offset:
__a = image - (scale / 2)
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , **UpperCAmelCase , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
__a = to_numpy_array(UpperCAmelCase )
if do_resize:
__a = self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase )
if do_center_crop:
__a = self.center_crop(UpperCAmelCase , size=UpperCAmelCase )
if do_rescale:
__a = self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase , offset=UpperCAmelCase )
if do_normalize:
__a = self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase )
__a = to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase )
return image
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = None , UpperCAmelCase = ChannelDimension.FIRST , **UpperCAmelCase , ) -> PIL.Image.Image:
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = offset if offset is not None else self.offset
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(UpperCAmelCase , param_name='crop_size' )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
__a = make_batched(UpperCAmelCase )
__a = [
[
self._preprocess_image(
image=UpperCAmelCase , do_resize=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , do_center_crop=UpperCAmelCase , crop_size=UpperCAmelCase , do_rescale=UpperCAmelCase , rescale_factor=UpperCAmelCase , offset=UpperCAmelCase , do_normalize=UpperCAmelCase , image_mean=UpperCAmelCase , image_std=UpperCAmelCase , data_format=UpperCAmelCase , )
for img in video
]
for video in videos
]
__a = {'pixel_values': videos}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 197
|
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class a__ ( __snake_case ):
A__ : Any = 'Wav2Vec2FeatureExtractor'
A__ : str = 'AutoTokenizer'
def __init__( self , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]:
super().__init__(UpperCAmelCase , UpperCAmelCase )
__a = self.feature_extractor
__a = False
@classmethod
def __SCREAMING_SNAKE_CASE ( cls , UpperCAmelCase , **UpperCAmelCase ) -> Dict:
try:
return super().from_pretrained(UpperCAmelCase , **UpperCAmelCase )
except OSError:
warnings.warn(
f'''Loading a tokenizer inside {cls.__name__} from a config that does not'''
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' , UpperCAmelCase , )
__a = WavaVecaFeatureExtractor.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
__a = WavaVecaCTCTokenizer.from_pretrained(UpperCAmelCase , **UpperCAmelCase )
return cls(feature_extractor=UpperCAmelCase , tokenizer=UpperCAmelCase )
def __call__( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*UpperCAmelCase , **UpperCAmelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
__a = kwargs.pop('raw_speech' )
else:
__a = kwargs.pop('audio' , UpperCAmelCase )
__a = kwargs.pop('sampling_rate' , UpperCAmelCase )
__a = kwargs.pop('text' , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
__a = args[0]
__a = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
__a = self.feature_extractor(UpperCAmelCase , *UpperCAmelCase , sampling_rate=UpperCAmelCase , **UpperCAmelCase )
if text is not None:
__a = self.tokenizer(UpperCAmelCase , **UpperCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
__a = encodings['input_ids']
return inputs
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*UpperCAmelCase , **UpperCAmelCase )
__a = kwargs.pop('input_features' , UpperCAmelCase )
__a = kwargs.pop('labels' , UpperCAmelCase )
if len(UpperCAmelCase ) > 0:
__a = args[0]
__a = args[1:]
if input_features is not None:
__a = self.feature_extractor.pad(UpperCAmelCase , *UpperCAmelCase , **UpperCAmelCase )
if labels is not None:
__a = self.tokenizer.pad(UpperCAmelCase , **UpperCAmelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
__a = labels['input_ids']
return input_features
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> List[str]:
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , *UpperCAmelCase , **UpperCAmelCase ) -> Union[str, Any]:
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@contextmanager
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
__a = True
__a = self.tokenizer
yield
__a = self.feature_extractor
__a = False
| 197
| 1
|
"""simple docstring"""
from __future__ import annotations
lowercase__ = 8.988E9 # units = N * m^s * C^-2
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->dict[str, float]:
a__: List[str] = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
a__: Dict = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
a__: int = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
a__: Optional[Any] = abs(_SCREAMING_SNAKE_CASE ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
a__: List[Any] = (COULOMBS_CONSTANT * charge_product / abs(_SCREAMING_SNAKE_CASE )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 290
|
"""simple docstring"""
def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) ->str:
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: Optional[int] = F'Expected string as input, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
a__: List[str] = F'Expected boolean as use_pascal parameter, found {type(_SCREAMING_SNAKE_CASE )}'
raise ValueError(_SCREAMING_SNAKE_CASE )
a__: int = input_str.split('_' )
a__: List[str] = 0 if use_pascal else 1
a__: List[str] = words[start_index:]
a__: List[str] = [word[0].upper() + word[1:] for word in words_to_capitalize]
a__: List[str] = '' if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 290
| 1
|
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
_snake_case : str = _symbol_database.Default()
_snake_case : Any = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
_snake_case : int = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
_snake_case : Dict = None
_snake_case : Union[str, Any] = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
_snake_case : int = 45
_snake_case : Optional[int] = 1581
_snake_case : Optional[int] = 1517
_snake_case : List[Any] = 1570
_snake_case : str = 1584
_snake_case : Tuple = 1793
_snake_case : str = 1795
_snake_case : Optional[int] = 1916
_snake_case : Optional[Any] = 1864
_snake_case : Tuple = 1905
_snake_case : Dict = 1919
_snake_case : Optional[Any] = 2429
_snake_case : Any = 2208
_snake_case : Tuple = 2418
_snake_case : Dict = 2323
_snake_case : List[str] = 2407
# @@protoc_insertion_point(module_scope)
| 207
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case : Optional[Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : Tuple = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : List[str] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 207
| 1
|
'''simple docstring'''
from __future__ import annotations
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> list:
__lowerCamelCase = []
__lowerCamelCase , __lowerCamelCase = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCamelCase = result + left + right
return input_list
def __lowerCAmelCase ( UpperCamelCase__ ) -> list:
if len(UpperCamelCase__ ) <= 1:
return input_list
__lowerCamelCase = list(UpperCamelCase__ )
# iteration for two-way merging
__lowerCamelCase = 2
while p <= len(UpperCamelCase__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(UpperCamelCase__ ) , UpperCamelCase__ ):
__lowerCamelCase = i
__lowerCamelCase = i + p - 1
__lowerCamelCase = (low + high + 1) // 2
__lowerCamelCase = merge(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# final merge of last two parts
if p * 2 >= len(UpperCamelCase__ ):
__lowerCamelCase = i
__lowerCamelCase = merge(UpperCamelCase__ , 0 , UpperCamelCase__ , len(UpperCamelCase__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__UpperCAmelCase =input("Enter numbers separated by a comma:\n").strip()
if user_input == "":
__UpperCAmelCase =[]
else:
__UpperCAmelCase =[int(item.strip()) for item in user_input.split(",")]
print(iter_merge_sort(unsorted))
| 67
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
__A = None
__A = logging.get_logger(__name__)
__A = {"vocab_file": "sentencepiece.model", "tokenizer_file": "tokenizer.json"}
__A = {
"vocab_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model",
},
"tokenizer_file": {
"google/rembert": "https://huggingface.co/google/rembert/resolve/main/tokenizer.json",
},
}
__A = {
"google/rembert": 256,
}
__A = "▁"
class snake_case ( __snake_case ):
SCREAMING_SNAKE_CASE_ : Tuple = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ : Dict = RemBertTokenizer
def __init__( self : Tuple , UpperCamelCase__ : Dict=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : int="[CLS]" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : List[str]="<unk>" , UpperCamelCase__ : Dict="[SEP]" , UpperCamelCase__ : int="<pad>" , UpperCamelCase__ : Any="[CLS]" , UpperCamelCase__ : str="[MASK]" , **UpperCamelCase__ : Optional[Any] , )-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: int = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__) if isinstance(UpperCamelCase__ , UpperCamelCase__) else mask_token
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , remove_space=UpperCamelCase__ , keep_accents=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , **UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = do_lower_case
__lowerCAmelCase: int = remove_space
__lowerCAmelCase: int = keep_accents
__lowerCAmelCase: str = vocab_file
__lowerCAmelCase: Tuple = False if not self.vocab_file else True
def lowercase_ ( self : Any , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False)-> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model.")
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(UpperCamelCase__)) + [1] + ([0] * len(UpperCamelCase__)) + [1]
return [1] + ([0] * len(UpperCamelCase__)) + [1]
def lowercase_ ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None)-> List[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = [self.sep_token_id]
__lowerCAmelCase: Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def lowercase_ ( self : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None)-> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__):
logger.error("Vocabulary path ({}) should be a directory".format(UpperCamelCase__))
return
__lowerCAmelCase: Optional[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCamelCase__):
copyfile(self.vocab_file , UpperCamelCase__)
return (out_vocab_file,)
| 217
| 0
|
'''simple docstring'''
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def __a ( _UpperCamelCase: Features ) -> Optional[int]:
"""simple docstring"""
_snake_case = np.inf
def set_batch_size(_UpperCamelCase: FeatureType ) -> None:
nonlocal batch_size
if isinstance(snake_case__ , snake_case__ ):
_snake_case = min(snake_case__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(snake_case__ , snake_case__ ):
_snake_case = min(snake_case__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(snake_case__ , snake_case__ ) and feature.dtype == "binary":
_snake_case = min(snake_case__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(snake_case__ , snake_case__ )
return None if batch_size is np.inf else batch_size
class _a ( __lowerCAmelCase ):
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = None ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = False ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> Any:
super().__init__(
lowerCAmelCase_ ,split=lowerCAmelCase_ ,features=lowerCAmelCase_ ,cache_dir=lowerCAmelCase_ ,keep_in_memory=lowerCAmelCase_ ,streaming=lowerCAmelCase_ ,num_proc=lowerCAmelCase_ ,**lowerCAmelCase_ ,)
_snake_case = path_or_paths if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ ) else {self.split: path_or_paths}
_snake_case = _PACKAGED_DATASETS_MODULES["parquet"][1]
_snake_case = Parquet(
cache_dir=lowerCAmelCase_ ,data_files=lowerCAmelCase_ ,features=lowerCAmelCase_ ,hash=lowerCAmelCase_ ,**lowerCAmelCase_ ,)
def _lowercase ( self ) -> Optional[Any]:
# Build iterable dataset
if self.streaming:
_snake_case = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_snake_case = None
_snake_case = None
_snake_case = None
_snake_case = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase_ ,download_mode=lowerCAmelCase_ ,verification_mode=lowerCAmelCase_ ,base_path=lowerCAmelCase_ ,num_proc=self.num_proc ,)
_snake_case = self.builder.as_dataset(
split=self.split ,verification_mode=lowerCAmelCase_ ,in_memory=self.keep_in_memory )
return dataset
class _a :
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = None ,**_SCREAMING_SNAKE_CASE ,) -> List[Any]:
_snake_case = dataset
_snake_case = path_or_buf
_snake_case = batch_size or get_writer_batch_size(dataset.features )
_snake_case = parquet_writer_kwargs
def _lowercase ( self ) -> int:
_snake_case = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with open(self.path_or_buf ,"wb+" ) as buffer:
_snake_case = self._write(file_obj=lowerCAmelCase_ ,batch_size=lowerCAmelCase_ ,**self.parquet_writer_kwargs )
else:
_snake_case = self._write(file_obj=self.path_or_buf ,batch_size=lowerCAmelCase_ ,**self.parquet_writer_kwargs )
return written
def _lowercase ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> int:
_snake_case = 0
_snake_case = parquet_writer_kwargs.pop("path_or_buf" ,lowerCAmelCase_ )
_snake_case = self.dataset.features.arrow_schema
_snake_case = pq.ParquetWriter(lowerCAmelCase_ ,schema=lowerCAmelCase_ ,**lowerCAmelCase_ )
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,lowerCAmelCase_ ) ,unit="ba" ,disable=not logging.is_progress_bar_enabled() ,desc="Creating parquet from Arrow format" ,):
_snake_case = query_table(
table=self.dataset._data ,key=slice(lowerCAmelCase_ ,offset + batch_size ) ,indices=self.dataset._indices if self.dataset._indices is not None else None ,)
writer.write_table(lowerCAmelCase_ )
written += batch.nbytes
writer.close()
return written
| 365
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ : Union[str, Any] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
UpperCamelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 142
| 0
|
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase_ : List[Any] = logging.get_logger(__name__)
class __A ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
__lowerCAmelCase = ["pixel_values"]
def __init__( self , __A = True , __A = None , __A = PILImageResampling.BICUBIC , __A = True , __A = True , __A = 1 / 255 , __A = None , __A = True , __A = None , __A = None , **__A , ) -> None:
super().__init__(**__A )
a =size if size is not None else {'''height''': 224, '''width''': 224}
a =get_size_dict(__A )
a =crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
a =get_size_dict(__A , default_to_square=__A , param_name='''crop_size''' )
a =do_resize
a =do_rescale
a =do_normalize
a =do_center_crop
a =crop_size
a =size
a =resample
a =rescale_factor
a =image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a =image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = PILImageResampling.BILINEAR , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A )
if "shortest_edge" in size:
a =get_resize_output_image_size(__A , size=size['''shortest_edge'''] , default_to_square=__A )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
a =(size['''height'''], size['''width'''])
else:
raise ValueError(f'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A , ) -> np.ndarray:
a =get_size_dict(__A )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__A , size=(size['''height'''], size['''width''']) , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A = None , **__A ) -> np.ndarray:
return rescale(__A , scale=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A , __A = None , **__A , ) -> np.ndarray:
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def SCREAMING_SNAKE_CASE ( self , __A , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = ChannelDimension.FIRST , **__A , ) -> BatchFeature:
a =do_resize if do_resize is not None else self.do_resize
a =do_rescale if do_rescale is not None else self.do_rescale
a =do_normalize if do_normalize is not None else self.do_normalize
a =do_center_crop if do_center_crop is not None else self.do_center_crop
a =crop_size if crop_size is not None else self.crop_size
a =get_size_dict(__A , param_name='''crop_size''' , default_to_square=__A )
a =resample if resample is not None else self.resample
a =rescale_factor if rescale_factor is not None else self.rescale_factor
a =image_mean if image_mean is not None else self.image_mean
a =image_std if image_std is not None else self.image_std
a =size if size is not None else self.size
a =get_size_dict(__A )
if not is_batched(__A ):
a =[images]
if not valid_images(__A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
# All transformations expect numpy arrays.
a =[to_numpy_array(__A ) for image in images]
if do_resize:
a =[self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
a =[self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
a =[self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
a =[self.normalize(image=__A , mean=__A , std=__A ) for image in images]
a =[to_channel_dimension_format(__A , __A ) for image in images]
a ={'''pixel_values''': images}
return BatchFeature(data=__A , tensor_type=__A )
| 81
|
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = process
_UpperCAmelCase = params
def __len__( self ) -> Union[str, Any]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
_UpperCAmelCase = self.dataset[i]
_UpperCAmelCase = self.process(_SCREAMING_SNAKE_CASE , **self.params )
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = loader
_UpperCAmelCase = infer
_UpperCAmelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCAmelCase = None
_UpperCAmelCase = loader_batch_size
# Internal bookkeeping
_UpperCAmelCase = None
_UpperCAmelCase = None
def __len__( self ) -> Any:
"""simple docstring"""
return len(self.loader )
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
_UpperCAmelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCAmelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Convert ModelOutput to tuple first
_UpperCAmelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
_UpperCAmelCase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
_UpperCAmelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCAmelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCAmelCase = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCAmelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCAmelCase = self._loader_batch_data.__class__(_SCREAMING_SNAKE_CASE )
self._loader_batch_index += 1
return result
def UpperCAmelCase__ ( self ) -> List[str]:
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCAmelCase = next(self.iterator )
_UpperCAmelCase = self.infer(_SCREAMING_SNAKE_CASE , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCAmelCase = processed
_UpperCAmelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
"""simple docstring"""
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __iter__( self ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
_UpperCAmelCase = None
return self
def UpperCAmelCase__ ( self ) -> int:
"""simple docstring"""
if self.subiterator is None:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
_UpperCAmelCase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
_UpperCAmelCase = next(self.subiterator )
return processed
class __a ( UpperCAmelCase ):
def __iter__( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = iter(self.loader )
return self
def UpperCAmelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = False
_UpperCAmelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
while not is_last:
_UpperCAmelCase = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_SCREAMING_SNAKE_CASE , torch.Tensor ):
_UpperCAmelCase = processed
else:
_UpperCAmelCase = list(processed.keys() )[0]
_UpperCAmelCase = processed[key]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCAmelCase = observed_batch_size
_UpperCAmelCase = processed
_UpperCAmelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCAmelCase = self.loader_batch_item()
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
if is_last:
return accumulator
else:
_UpperCAmelCase = processed
_UpperCAmelCase = item.pop('is_last' )
accumulator.append(_SCREAMING_SNAKE_CASE )
return accumulator
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = key
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
return self.dataset[i][self.key]
class __a ( UpperCAmelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = dataset
_UpperCAmelCase = keya
_UpperCAmelCase = keya
def __len__( self ) -> Optional[int]:
"""simple docstring"""
return len(self.dataset )
def __getitem__( self , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 329
| 0
|
"""simple docstring"""
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
__snake_case = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : Union[str, Any] = 'facebook/nllb-200-distilled-600M'
A_ : str = (
'This is a tool that translates text from a language to another. It takes three inputs: `text`, which should '
'be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, '
'which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in '
'plain English, such as \'Romanian\', or \'Albanian\'. It returns the text translated in `tgt_lang`.'
)
A_ : Optional[Any] = 'translator'
A_ : int = AutoTokenizer
A_ : Dict = AutoModelForSeqaSeqLM
A_ : List[str] = LANGUAGE_CODES
A_ : int = ['text', 'text', 'text']
A_ : str = ['text']
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
if src_lang not in self.lang_to_code:
raise ValueError(F'{src_lang} is not a supported language.' )
if tgt_lang not in self.lang_to_code:
raise ValueError(F'{tgt_lang} is not a supported language.' )
_a = self.lang_to_code[src_lang]
_a = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__UpperCAmelCase , return_tensors='''pt''' , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> int:
return self.model.generate(**__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Union[str, Any]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__UpperCAmelCase )
| 357
|
"""simple docstring"""
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__snake_case = datasets.load_iris()
__snake_case = np.array(data['''data'''])
__snake_case = np.array(data['''target'''])
__snake_case = data['''target_names''']
__snake_case ,__snake_case ,__snake_case ,__snake_case = train_test_split(X, y)
def A_ ( _lowerCAmelCase : int, _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return np.linalg.norm(np.array(_lowerCAmelCase ) - np.array(_lowerCAmelCase ) )
def A_ ( _lowerCAmelCase : Optional[int], _lowerCAmelCase : Tuple, _lowerCAmelCase : Optional[Any], _lowerCAmelCase : int, _lowerCAmelCase : str=5 ):
"""simple docstring"""
_a = zip(_lowerCAmelCase, _lowerCAmelCase )
# List of distances of all points from the point to be classified
_a = []
for data_point in data:
_a = euclidean_distance(data_point[0], _lowerCAmelCase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_a = [i[1] for i in sorted(_lowerCAmelCase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_a = Counter(_lowerCAmelCase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 153
| 0
|
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase : List[str] =logging.get_logger(__name__)
__lowerCAmelCase : Dict ={
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
__lowerCAmelCase : Optional[int] ={
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
__lowerCAmelCase : Union[str, Any] ="""</w>"""
__lowerCAmelCase : Optional[int] ="""@@ """
def UpperCAmelCase__ ( lowerCAmelCase__ :Optional[Any] ) -> int:
'''simple docstring'''
lowercase = set()
lowercase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase = char
return pairs
# Speech2Text2 has no max input length
__lowerCAmelCase : Tuple ={"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class _A ( lowerCAmelCase ):
snake_case__ : str = VOCAB_FILES_NAMES
snake_case__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str = ['input_ids', 'attention_mask']
def __init__( self , __lowerCAmelCase , __lowerCAmelCase="<s>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase=False , __lowerCAmelCase=None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(
unk_token=__lowerCAmelCase , bos_token=__lowerCAmelCase , eos_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , do_lower_case=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase = do_lower_case
with open(__lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
lowercase = json.load(__lowerCAmelCase )
lowercase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'No merges files provided. {self.__class__.__name__} can only be used for decoding.' )
lowercase = None
lowercase = None
else:
with open(__lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
lowercase = merges_handle.read().split("""\n""" )[:-1]
lowercase = [tuple(merge.split()[:2] ) for merge in merges]
lowercase = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase = {}
@property
def A__ ( self ):
"""simple docstring"""
return len(self.decoder )
def A__ ( self ):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase = get_pairs(__lowerCAmelCase )
if not pairs:
return token
while True:
lowercase = min(__lowerCAmelCase , key=lambda __lowerCAmelCase : self.bpe_ranks.get(__lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase = bigram
lowercase = []
lowercase = 0
while i < len(__lowerCAmelCase ):
try:
lowercase = word.index(__lowerCAmelCase , __lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase = j
if word[i] == first and i < len(__lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase = tuple(__lowerCAmelCase )
lowercase = new_word
if len(__lowerCAmelCase ) == 1:
break
else:
lowercase = get_pairs(__lowerCAmelCase )
lowercase = """ """.join(__lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
lowercase = """\n""" + BPE_TOKEN_MERGES
if word.endswith(__lowerCAmelCase ):
lowercase = word.replace(__lowerCAmelCase , """""" )
lowercase = word.replace(""" """ , __lowerCAmelCase )
lowercase = word
return word
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
lowercase = text.lower()
lowercase = text.split()
lowercase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
return self.encoder.get(__lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = self.decoder.get(__lowerCAmelCase , self.unk_token )
return result
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
lowercase = """ """.join(__lowerCAmelCase )
# make sure @@ tokens are concatenated
lowercase = """""".join(string.split(__lowerCAmelCase ) )
return string
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowercase = os.path.join(
__lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCAmelCase , ensure_ascii=__lowerCAmelCase ) + """\n""" )
lowercase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
lowercase = token_index
writer.write(""" """.join(__lowerCAmelCase ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 197
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
__lowerCAmelCase : Union[str, Any] =0B1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
__lowerCAmelCase : Optional[int] =[int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = WATERMARK_BITS
lowercase = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def A__ ( self , __lowerCAmelCase ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
lowercase = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase = [self.encoder.encode(__lowerCAmelCase , """dwtDct""" ) for image in images]
lowercase = torch.from_numpy(np.array(__lowerCAmelCase ) ).permute(0 , 3 , 1 , 2 )
lowercase = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 197
| 1
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a : List[Any]= logging.get_logger(__name__)
_a : List[Any]= {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class UpperCamelCase ( lowercase ):
"""simple docstring"""
UpperCAmelCase : List[str] = """ibert"""
def __init__(self : str , _A : List[Any]=3_05_22 , _A : List[str]=7_68 , _A : Optional[Any]=12 , _A : Any=12 , _A : Any=30_72 , _A : Any="gelu" , _A : int=0.1 , _A : Union[str, Any]=0.1 , _A : int=5_12 , _A : str=2 , _A : Any=0.02 , _A : Dict=1E-12 , _A : List[str]=1 , _A : List[Any]=0 , _A : Any=2 , _A : Optional[Any]="absolute" , _A : List[Any]=False , _A : Tuple="none" , **_A : List[str] , ) -> List[Any]:
super().__init__(pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , **_A)
__snake_case : int = vocab_size
__snake_case : List[Any] = hidden_size
__snake_case : int = num_hidden_layers
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : int = hidden_act
__snake_case : Dict = intermediate_size
__snake_case : Optional[Any] = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : Dict = type_vocab_size
__snake_case : Tuple = initializer_range
__snake_case : List[str] = layer_norm_eps
__snake_case : str = position_embedding_type
__snake_case : Optional[int] = quant_mode
__snake_case : int = force_dequant
class UpperCamelCase ( lowercase ):
"""simple docstring"""
@property
def _lowercase (self : List[str]) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__snake_case : Tuple = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
__snake_case : Optional[int] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
])
| 365
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCamelCase ( unittest.TestCase ):
def _lowercase (self : Union[str, Any]) -> Optional[int]:
__snake_case : Optional[Any] = 0
def _lowercase (self : Tuple) -> int:
__snake_case : Optional[Any] = AutoImageProcessor.from_pretrained('openai/clip-vit-base-patch32')
self.assertIsInstance(_A , _A)
def _lowercase (self : str) -> List[str]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : List[str] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Any) -> Optional[int]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Any = Path(_A) / 'preprocessor_config.json'
__snake_case : List[Any] = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : List[Any]) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : str = CLIPConfig()
# Create a dummy config file with image_proceesor_type
__snake_case : List[Any] = Path(_A) / 'preprocessor_config.json'
__snake_case : Optional[Any] = Path(_A) / 'config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
__snake_case : List[str] = AutoImageProcessor.from_pretrained(_A).to_dict()
config_dict.pop('image_processor_type')
__snake_case : Optional[int] = CLIPImageProcessor(**_A)
# save in new folder
model_config.save_pretrained(_A)
config.save_pretrained(_A)
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A)
# make sure private variable is not incorrectly saved
__snake_case : int = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(_A , _A)
def _lowercase (self : Union[str, Any]) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : int = Path(_A) / 'preprocessor_config.json'
json.dump(
{'image_processor_type': 'CLIPImageProcessor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
__snake_case : List[str] = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
def _lowercase (self : Optional[int]) -> Dict:
with self.assertRaisesRegex(
_A , 'clip-base is not a local folder and is not a valid model identifier'):
__snake_case : Tuple = AutoImageProcessor.from_pretrained('clip-base')
def _lowercase (self : str) -> int:
with self.assertRaisesRegex(
_A , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
__snake_case : str = AutoImageProcessor.from_pretrained(_A , revision='aaaaaa')
def _lowercase (self : List[Any]) -> str:
with self.assertRaisesRegex(
_A , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
__snake_case : List[Any] = AutoImageProcessor.from_pretrained('hf-internal-testing/config-no-model')
def _lowercase (self : Optional[int]) -> List[str]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_A):
__snake_case : Any = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(_A):
__snake_case : Tuple = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
__snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(_A , trust_remote_code=_A)
self.assertEqual(reloaded_image_processor.__class__.__name__ , 'NewImageProcessor')
def _lowercase (self : int) -> Optional[int]:
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_A):
AutoImageProcessor.register(_A , _A)
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case : Tuple = Path(_A) / 'preprocessor_config.json'
__snake_case : Dict = Path(_A) / 'config.json'
json.dump(
{'feature_extractor_type': 'CLIPFeatureExtractor', 'processor_class': 'CLIPProcessor'} , open(_A , 'w') , )
json.dump({'model_type': 'clip'} , open(_A , 'w'))
__snake_case : Tuple = CustomImageProcessor.from_pretrained(_A)
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_A)
__snake_case : Tuple = AutoImageProcessor.from_pretrained(_A)
self.assertIsInstance(_A , _A)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : List[Any]) -> Tuple:
class UpperCamelCase ( lowercase ):
UpperCAmelCase : str = True
try:
AutoConfig.register('custom' , _A)
AutoImageProcessor.register(_A , _A)
# If remote code is not set, the default is to use local
__snake_case : Tuple = AutoImageProcessor.from_pretrained('hf-internal-testing/test_dynamic_image_processor')
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote code is disabled, we load the local one.
__snake_case : Optional[int] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(image_processor.is_local)
# If remote is enabled, we load from the Hub
__snake_case : List[Any] = AutoImageProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_image_processor' , trust_remote_code=_A)
self.assertEqual(image_processor.__class__.__name__ , 'NewImageProcessor')
self.assertTrue(not hasattr(_A , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 95
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A__ : Any = logging.get_logger(__name__)
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
lowercase__ = ["""pixel_values"""]
def __init__( self : Tuple, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC, lowerCamelCase : bool = True, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : Union[int, float] = 1 / 255, lowerCamelCase : bool = True, lowerCamelCase : bool = True, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, **lowerCamelCase : Tuple, ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
lowercase__ = size if size is not None else {'''height''': 256, '''width''': 256}
lowercase__ = get_size_dict(lowerCamelCase )
lowercase__ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase__ = get_size_dict(lowerCamelCase, param_name='''crop_size''' )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = resample
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : PILImageResampling = PIL.Image.BICUBIC, lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : str, ):
'''simple docstring'''
lowercase__ = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return resize(
lowerCamelCase, size=(size['''height'''], size['''width''']), resample=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : List[Any], lowerCamelCase : np.ndarray, lowerCamelCase : Dict[str, int], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : List[Any], ):
'''simple docstring'''
lowercase__ = get_size_dict(lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCamelCase, size=(size['''height'''], size['''width''']), data_format=lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : Union[int, float], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Optional[int], ):
'''simple docstring'''
return rescale(lowerCamelCase, scale=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : np.ndarray, lowerCamelCase : Union[float, List[float]], lowerCamelCase : Union[float, List[float]], lowerCamelCase : Optional[Union[str, ChannelDimension]] = None, **lowerCamelCase : Union[str, Any], ):
'''simple docstring'''
return normalize(lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase, data_format=lowerCamelCase, **lowerCamelCase )
def lowercase__ ( self : int, lowerCamelCase : ImageInput, lowerCamelCase : bool = None, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : Union[str, Any]=None, lowerCamelCase : bool = None, lowerCamelCase : Dict[str, int] = None, lowerCamelCase : bool = None, lowerCamelCase : float = None, lowerCamelCase : bool = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[float, List[float]]] = None, lowerCamelCase : Optional[Union[str, TensorType]] = None, lowerCamelCase : ChannelDimension = ChannelDimension.FIRST, **lowerCamelCase : Any, ):
'''simple docstring'''
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(lowerCamelCase )
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(lowerCamelCase, param_name='''crop_size''' )
lowercase__ = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=lowerCamelCase, size=lowerCamelCase, resample=lowerCamelCase ) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=lowerCamelCase, size=lowerCamelCase ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=lowerCamelCase, scale=lowerCamelCase ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=lowerCamelCase, mean=lowerCamelCase, std=lowerCamelCase ) for image in images]
lowercase__ = [to_channel_dimension_format(lowerCamelCase, lowerCamelCase ) for image in images]
lowercase__ = {'''pixel_values''': images}
return BatchFeature(data=lowerCamelCase, tensor_type=lowerCamelCase )
| 207
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _UpperCAmelCase ( A__ ):
"""simple docstring"""
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCamelCase, '''tf_padding''' ) )
self.parent.assertTrue(hasattr(lowerCamelCase, '''depth_multiplier''' ) )
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Optional[Any]=13, lowerCamelCase : List[str]=3, lowerCamelCase : List[str]=32, lowerCamelCase : Union[str, Any]=0.25, lowerCamelCase : int=8, lowerCamelCase : Dict=True, lowerCamelCase : Optional[int]=1_024, lowerCamelCase : List[str]=32, lowerCamelCase : Optional[int]="relu6", lowerCamelCase : Optional[int]=0.1, lowerCamelCase : Optional[Any]=0.02, lowerCamelCase : List[Any]=True, lowerCamelCase : Any=True, lowerCamelCase : Dict=10, lowerCamelCase : Optional[int]=None, ):
'''simple docstring'''
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = num_channels
lowercase__ = image_size
lowercase__ = depth_multiplier
lowercase__ = min_depth
lowercase__ = tf_padding
lowercase__ = int(last_hidden_size * depth_multiplier )
lowercase__ = output_stride
lowercase__ = hidden_act
lowercase__ = classifier_dropout_prob
lowercase__ = use_labels
lowercase__ = is_training
lowercase__ = num_labels
lowercase__ = initializer_range
lowercase__ = scope
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size], self.num_labels )
lowercase__ = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
lowercase__ = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : List[str] ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels, image_size=self.image_size, depth_multiplier=self.depth_multiplier, min_depth=self.min_depth, tf_padding=self.tf_padding, hidden_act=self.hidden_act, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, )
def lowercase__ ( self : List[str], lowerCamelCase : List[str], lowerCamelCase : Union[str, Any], lowerCamelCase : Tuple, lowerCamelCase : Tuple ):
'''simple docstring'''
lowercase__ = MobileNetVaModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def lowercase__ ( self : str, lowerCamelCase : str, lowerCamelCase : List[str], lowerCamelCase : List[Any], lowerCamelCase : str ):
'''simple docstring'''
lowercase__ = self.num_labels
lowercase__ = MobileNetVaForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowercase__ = model(lowerCamelCase, labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( A__ ,A__ ,unittest.TestCase ):
"""simple docstring"""
lowercase__ = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowercase__ = (
{"""feature-extraction""": MobileNetVaModel, """image-classification""": MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = False
def lowercase__ ( self : List[str] ):
'''simple docstring'''
lowercase__ = MobileNetVaModelTester(self )
lowercase__ = MobileNetVaConfigTester(self, config_class=lowerCamelCase, has_text_modality=lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowercase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowercase__ ( self : Optional[int] ):
'''simple docstring'''
pass
def lowercase__ ( self : Dict ):
'''simple docstring'''
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = model_class(lowerCamelCase )
lowercase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase__ = [*signature.parameters.keys()]
lowercase__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], lowerCamelCase )
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase__ ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : str, lowerCamelCase : Optional[int], lowerCamelCase : List[str] ):
lowercase__ = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
lowercase__ = model(**self._prepare_for_class(lowerCamelCase, lowerCamelCase ) )
lowercase__ = outputs.hidden_states
lowercase__ = 26
self.assertEqual(len(lowerCamelCase ), lowerCamelCase )
lowercase__ , lowercase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase__ = True
check_hidden_states_output(lowerCamelCase, lowerCamelCase, lowerCamelCase )
def lowercase__ ( self : Any ):
'''simple docstring'''
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
@slow
def lowercase__ ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase__ = MobileNetVaModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def a ( ):
'''simple docstring'''
lowercase__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase__ ( self : str ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : int ):
'''simple docstring'''
lowercase__ = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCamelCase )
lowercase__ = self.default_image_processor
lowercase__ = prepare_img()
lowercase__ = image_processor(images=lowerCamelCase, return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowercase__ = model(**lowerCamelCase )
# verify the logits
lowercase__ = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape, lowerCamelCase )
lowercase__ = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3], lowerCamelCase, atol=1E-4 ) )
| 207
| 1
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = 1.5
SCREAMING_SNAKE_CASE__ = int(factor * num_class_images )
SCREAMING_SNAKE_CASE__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_A , aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''' , exist_ok=_A )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
SCREAMING_SNAKE_CASE__ = client.query(text=_A )
if len(_A ) >= factor * num_class_images or num_images > 1e4:
break
else:
SCREAMING_SNAKE_CASE__ = int(factor * num_images )
SCREAMING_SNAKE_CASE__ = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=_A , aesthetic_weight=0.1 , )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = tqdm(desc='''downloading real regularization images''' , total=_A )
with open(F'''{class_data_dir}/caption.txt''' , '''w''' ) as fa, open(F'''{class_data_dir}/urls.txt''' , '''w''' ) as fa, open(
F'''{class_data_dir}/images.txt''' , '''w''' ) as fa:
while total < num_class_images:
SCREAMING_SNAKE_CASE__ = class_images[count]
count += 1
try:
SCREAMING_SNAKE_CASE__ = requests.get(images['''url'''] )
if img.status_code == 2_00:
SCREAMING_SNAKE_CASE__ = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''' , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser('''''' , add_help=_A )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=_A , type=_A )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=_A , type=_A )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=_A )
return parser.parse_args()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE : Tuple = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 361
|
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''multiplicative_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''multiplicative_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 1
for i in range(0 , len(_A ) ):
total *= numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
def UpperCAmelCase_ ( _A ):
'''simple docstring'''
if not isinstance(_A , _A ):
raise ValueError('''additive_persistence() only accepts integral values''' )
if num < 0:
raise ValueError('''additive_persistence() does not accept negative values''' )
SCREAMING_SNAKE_CASE__ = 0
SCREAMING_SNAKE_CASE__ = str(_A )
while len(_A ) != 1:
SCREAMING_SNAKE_CASE__ = [int(_A ) for i in num_string]
SCREAMING_SNAKE_CASE__ = 0
for i in range(0 , len(_A ) ):
total += numbers[i]
SCREAMING_SNAKE_CASE__ = str(_A )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
| 0
|
"""simple docstring"""
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class A_ :
"""simple docstring"""
def __init__( self :str , lowercase_ :str , lowercase_ :int=13 , lowercase_ :Optional[Any]=7 , lowercase_ :List[Any]=True , lowercase_ :List[Any]=True , lowercase_ :List[str]=False , lowercase_ :Optional[Any]=True , lowercase_ :List[Any]=99 , lowercase_ :List[str]=64 , lowercase_ :int=5 , lowercase_ :List[str]=4 , lowercase_ :Any=64 , lowercase_ :int="gelu" , lowercase_ :Optional[int]=0.1 , lowercase_ :Union[str, Any]=0.1 , lowercase_ :Union[str, Any]=5_12 , lowercase_ :List[Any]=16 , lowercase_ :Optional[Any]=2 , lowercase_ :str=0.02 , lowercase_ :Any=3 , lowercase_ :Tuple=4 , lowercase_ :Optional[Any]=None , ) -> int:
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
def UpperCAmelCase__ ( self :Union[str, Any] ) -> Union[str, Any]:
return MPNetConfig.from_pretrained('microsoft/mpnet-base' )
def UpperCAmelCase__ ( self :Tuple ) -> int:
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase = None
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self :List[str] ) -> List[str]:
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[int] , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :Any ) -> Any:
UpperCAmelCase = MPNetModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , lowercase_ )
UpperCAmelCase = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self :Dict , lowercase_ :str , lowercase_ :Any , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :str , lowercase_ :int ) -> int:
UpperCAmelCase = MPNetForQuestionAnswering(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , start_positions=lowercase_ , end_positions=lowercase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self :Dict , lowercase_ :str , lowercase_ :List[Any] , lowercase_ :Optional[int] , lowercase_ :Dict , lowercase_ :List[Any] , lowercase_ :List[str] ) -> Optional[int]:
UpperCAmelCase = self.num_labels
UpperCAmelCase = MPNetForSequenceClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] , lowercase_ :Any , lowercase_ :Union[str, Any] , lowercase_ :Dict , lowercase_ :Optional[Any] , lowercase_ :Optional[int] , lowercase_ :Optional[int] ) -> Any:
UpperCAmelCase = self.num_choices
UpperCAmelCase = MPNetForMultipleChoice(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase = model(
lowercase_ , attention_mask=lowercase_ , labels=lowercase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Tuple , lowercase_ :str , lowercase_ :Tuple , lowercase_ :Any , lowercase_ :str , lowercase_ :Union[str, Any] ) -> Dict:
UpperCAmelCase = self.num_labels
UpperCAmelCase = MPNetForTokenClassification(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self :Any ) -> Dict:
UpperCAmelCase = self.prepare_config_and_inputs()
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) = config_and_inputs
UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
"""feature-extraction""": MPNetModel,
"""fill-mask""": MPNetForMaskedLM,
"""question-answering""": MPNetForQuestionAnswering,
"""text-classification""": MPNetForSequenceClassification,
"""token-classification""": MPNetForTokenClassification,
"""zero-shot""": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = True
def UpperCAmelCase__ ( self :Optional[Any] ) -> int:
UpperCAmelCase = MPNetModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=lowercase_ , hidden_size=37 )
def UpperCAmelCase__ ( self :Optional[int] ) -> Dict:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :int ) -> Optional[int]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*lowercase_ )
def UpperCAmelCase__ ( self :Optional[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Any ) -> int:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*lowercase_ )
def UpperCAmelCase__ ( self :List[Any] ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*lowercase_ )
def UpperCAmelCase__ ( self :Dict ) -> List[str]:
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*lowercase_ )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Dict ) -> List[str]:
UpperCAmelCase = MPNetModel.from_pretrained('microsoft/mpnet-base' )
UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
UpperCAmelCase = model(lowercase_ )[0]
UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , lowercase_ )
UpperCAmelCase = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase_ , atol=1E-4 ) )
| 78
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A : Dict = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_A : int = 25_00_04
_A : str = 25_00_20
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ,unittest.TestCase ):
_UpperCAmelCase : Optional[Any] = MBartTokenizer
_UpperCAmelCase : List[Any] = MBartTokenizerFast
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : Optional[int] = True
def __lowerCamelCase ( self : Union[str, Any] ) ->Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase__ : Optional[Any] = MBartTokenizer(A , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self : Tuple ) ->List[str]:
lowerCamelCase__ : str = MBartTokenizer(A , keep_accents=A )
lowerCamelCase__ : Optional[int] = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowerCamelCase__ : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''9''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''é''',
'''.''',
] , )
lowerCamelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
lowerCamelCase__ : List[str] = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [
SPIECE_UNDERLINE + '''I''',
SPIECE_UNDERLINE + '''was''',
SPIECE_UNDERLINE + '''b''',
'''or''',
'''n''',
SPIECE_UNDERLINE + '''in''',
SPIECE_UNDERLINE + '''''',
'''<unk>''',
'''2''',
'''0''',
'''0''',
'''0''',
''',''',
SPIECE_UNDERLINE + '''and''',
SPIECE_UNDERLINE + '''this''',
SPIECE_UNDERLINE + '''is''',
SPIECE_UNDERLINE + '''f''',
'''al''',
'''s''',
'''<unk>''',
'''.''',
] , )
def __lowerCamelCase ( self : List[Any] ) ->List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase__ : Any = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCamelCase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : Any = self.tokenizer_class.from_pretrained(A , **A )
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(A )
lowerCamelCase__ : List[Any] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowerCamelCase__ : Any = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[int] = tokenizer_r.from_pretrained(A )
lowerCamelCase__ : Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
lowerCamelCase__ : List[Any] = tempfile.mkdtemp()
lowerCamelCase__ : Union[str, Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
lowerCamelCase__ : str = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Optional[Any] = tokenizer_r.from_pretrained(A )
lowerCamelCase__ : List[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
lowerCamelCase__ : Optional[Any] = tempfile.mkdtemp()
lowerCamelCase__ : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A )
lowerCamelCase__ : List[str] = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase__ : Any = tokenizer_r.from_pretrained(A )
lowerCamelCase__ : Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
_UpperCAmelCase : Any = "facebook/mbart-large-en-ro"
_UpperCAmelCase : Optional[int] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
_UpperCAmelCase : Optional[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
_UpperCAmelCase : Tuple = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __lowerCamelCase ( cls : Optional[Any] ) ->Dict:
lowerCamelCase__ : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowerCamelCase__ : int = 1
return cls
def __lowerCamelCase ( self : int ) ->Optional[Any]:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
def __lowerCamelCase ( self : str ) ->Any:
lowerCamelCase__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def __lowerCamelCase ( self : Tuple ) ->Tuple:
self.assertIn(A , self.tokenizer.all_special_ids )
lowerCamelCase__ : List[str] = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowerCamelCase__ : str = self.tokenizer.decode(A , skip_special_tokens=A )
lowerCamelCase__ : str = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def __lowerCamelCase ( self : Optional[Any] ) ->int:
lowerCamelCase__ : List[str] = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , A )
lowerCamelCase__ : str = 1_0
lowerCamelCase__ : Dict = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , A )
self.assertEqual(len(A ) , A )
def __lowerCamelCase ( self : List[str] ) ->str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_2_6, 2_5_0_0_0_1] )
def __lowerCamelCase ( self : List[Any] ) ->List[Any]:
lowerCamelCase__ : List[str] = tempfile.mkdtemp()
lowerCamelCase__ : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
lowerCamelCase__ : List[Any] = MBartTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def __lowerCamelCase ( self : Union[str, Any] ) ->Any:
lowerCamelCase__ : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors='''pt''' )
lowerCamelCase__ : str = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __lowerCamelCase ( self : Any ) ->List[str]:
lowerCamelCase__ : str = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowerCamelCase__ : Optional[Any] = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowerCamelCase__ : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] )
def __lowerCamelCase ( self : Any ) ->List[str]:
lowerCamelCase__ : str = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors='''pt''' )
lowerCamelCase__ : Any = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=1_0 , return_tensors='''pt''' )
lowerCamelCase__ : str = targets['''input_ids''']
lowerCamelCase__ : int = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def __lowerCamelCase ( self : Optional[Any] ) ->Optional[Any]:
lowerCamelCase__ : Dict = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(A ) , {
# A, test, EOS, en_XX
'''input_ids''': [[6_2, 3_0_3_4, 2, 2_5_0_0_0_4]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 142
| 0
|
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Any ):
'''simple docstring'''
if index == r:
for j in range(__SCREAMING_SNAKE_CASE ):
print(data[j] , end=""" """ )
print(""" """ )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : List[Any] = arr[i]
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 , __SCREAMING_SNAKE_CASE , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
__snake_case : str = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 0 , __SCREAMING_SNAKE_CASE , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowercase_ = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 364
|
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowercase_ = {
"text_branch": "text_model",
"audio_branch": "audio_model.audio_encoder",
"attn": "attention.self",
"self.proj": "output.dense",
"attention.self_mask": "attn_mask",
"mlp.fc1": "intermediate.dense",
"mlp.fc2": "output.dense",
"norm1": "layernorm_before",
"norm2": "layernorm_after",
"bn0": "batch_norm",
}
lowercase_ = AutoFeatureExtractor.from_pretrained("laion/clap-htsat-unfused", truncation="rand_trunc")
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : str = create_model(
"""HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
__snake_case : Union[str, Any] = {}
__snake_case : List[Any] = R""".*sequential.(\d+).*"""
__snake_case : Union[str, Any] = R""".*_projection.(\d+).*"""
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
# replace sequential layers with list
__snake_case : Optional[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 )
__snake_case : Dict = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.''' )
elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
__snake_case : str = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__snake_case : List[Any] = 1 if projecton_layer == 0 else 2
__snake_case : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
__snake_case : Optional[int] = value
__snake_case : Any = mixed_qkv.size(0 ) // 3
__snake_case : List[Any] = mixed_qkv[:qkv_dim]
__snake_case : Tuple = mixed_qkv[qkv_dim : qkv_dim * 2]
__snake_case : List[Any] = mixed_qkv[qkv_dim * 2 :]
__snake_case : Any = query_layer
__snake_case : Dict = key_layer
__snake_case : Optional[Any] = value_layer
else:
__snake_case : List[str] = value
return model_state_dict
def __lowerCAmelCase ( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=False ):
'''simple docstring'''
__snake_case , __snake_case : List[str] = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE )
clap_model.eval()
__snake_case : Tuple = clap_model.state_dict()
__snake_case : Union[str, Any] = rename_state_dict(__SCREAMING_SNAKE_CASE )
__snake_case : List[Any] = ClapConfig()
__snake_case : Tuple = enable_fusion
__snake_case : Any = ClapModel(__SCREAMING_SNAKE_CASE )
# ignore the spectrogram embedding layer
model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument("--enable_fusion", action="store_true", help="Whether to enable fusion or not")
lowercase_ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 20
| 0
|
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=_lowercase):
"""simple docstring"""
UpperCamelCase__ = ["transformers", "torch", "note_seq"]
def __init__( self , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase ( cls , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def UpperCamelCase ( cls , *UpperCAmelCase , **UpperCAmelCase ):
"""simple docstring"""
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
| 39
|
"""simple docstring"""
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , exponent // 2 , _SCREAMING_SNAKE_CASE ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(_SCREAMING_SNAKE_CASE , exponent - 1 , _SCREAMING_SNAKE_CASE )) % modulo_value
def a__ ( _SCREAMING_SNAKE_CASE = 1_777 , _SCREAMING_SNAKE_CASE = 1_855 , _SCREAMING_SNAKE_CASE = 8 ):
"""simple docstring"""
UpperCamelCase = base
for _ in range(1 , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = _modexpt(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , 10**digits )
return result
if __name__ == "__main__":
print(f'''{solution() = }''')
| 153
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''BlipImageProcessor'''
__magic_name__ = '''AutoTokenizer'''
def __init__( self : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Dict ) -> str:
UpperCAmelCase_ : Dict = False
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase_ : ImageInput = None , lowerCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = None , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
UpperCAmelCase_ : List[Any] = self.tokenizer
UpperCAmelCase_ : Any = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
return text_encoding
# add pixel_values
UpperCAmelCase_ : Union[str, Any] = self.image_processor(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ )
if text is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
else:
UpperCAmelCase_ : Dict = None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase_ )
return encoding_image_processor
def _SCREAMING_SNAKE_CASE ( self : Any , *lowerCAmelCase_ : int , **lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : int ) -> Optional[Any]:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
UpperCAmelCase_ : List[str] = self.tokenizer.model_input_names
UpperCAmelCase_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 253
|
"""simple docstring"""
import re
import string
import numpy as np
import datasets
lowerCamelCase_ = '''
Returns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.
'''
lowerCamelCase_ = '''
Args:
predictions: List of predicted texts.
references: List of reference texts.
regexes_to_ignore: List, defaults to None. Regex expressions of characters to
ignore when calculating the exact matches. Note: these regexes are removed
from the input data before the changes based on the options below (e.g. ignore_case,
ignore_punctuation, ignore_numbers) are applied.
ignore_case: Boolean, defaults to False. If true, turns everything
to lowercase so that capitalization differences are ignored.
ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before
comparing predictions and references.
Returns:
exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.
Examples:
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
25.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
50.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)
>>> print(round(results["exact_match"], 1))
75.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["the cat", "theater", "YELLING", "agent007"]
>>> preds = ["cat?", "theater", "yelling", "agent"]
>>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)
>>> print(round(results["exact_match"], 1))
100.0
>>> exact_match = datasets.load_metric("exact_match")
>>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]
>>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]
>>> results = exact_match.compute(references=refs, predictions=preds)
>>> print(round(results["exact_match"], 1))
33.3
'''
lowerCamelCase_ = '''
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase_ (datasets.Metric ):
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , reference_urls=[] , )
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[Any]=None , lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[Any]=False , lowerCAmelCase_ : Optional[Any]=False , ) -> str:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCAmelCase_ : str = np.array([re.sub(lowerCAmelCase_ , "" , lowerCAmelCase_ ) for x in predictions] )
UpperCAmelCase_ : Dict = np.array([re.sub(lowerCAmelCase_ , "" , lowerCAmelCase_ ) for x in references] )
else:
UpperCAmelCase_ : int = np.asarray(lowerCAmelCase_ )
UpperCAmelCase_ : Union[str, Any] = np.asarray(lowerCAmelCase_ )
if ignore_case:
UpperCAmelCase_ : Optional[Any] = np.char.lower(lowerCAmelCase_ )
UpperCAmelCase_ : int = np.char.lower(lowerCAmelCase_ )
if ignore_punctuation:
UpperCAmelCase_ : Any = string.punctuation.maketrans("" , "" , string.punctuation )
UpperCAmelCase_ : Any = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : Any = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
if ignore_numbers:
UpperCAmelCase_ : Dict = string.digits.maketrans("" , "" , string.digits )
UpperCAmelCase_ : Optional[Any] = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : int = np.char.translate(lowerCAmelCase_ , table=lowerCAmelCase_ )
UpperCAmelCase_ : Optional[int] = predictions == references
return {"exact_match": np.mean(lowerCAmelCase_ ) * 100}
| 253
| 1
|
from __future__ import annotations
import copy
import tempfile
import unittest
from transformers import CONFIG_MAPPING, AutoConfig, BertConfig, GPTaConfig, TaConfig, TapasConfig, is_tf_available
from transformers.testing_utils import (
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tensorflow_probability,
require_tf,
slow,
)
from ..bert.test_modeling_bert import BertModelTester
if is_tf_available():
from transformers import (
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelForTableQuestionAnswering,
TFAutoModelForTokenClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFFunnelBaseModel,
TFFunnelModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
TFTapasForQuestionAnswering,
)
from transformers.models.auto.modeling_tf_auto import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_MAPPING,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.tapas.modeling_tf_tapas import TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ : str = """new-model"""
if is_tf_available():
class __lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ : Optional[int] = NewModelConfig
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case_ ( self : Optional[Any] ):
__lowercase : str = "bert-base-cased"
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Optional[Any] = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Union[str, Any] ):
__lowercase : List[Any] = "bert-base-cased"
__lowercase : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Tuple = TFAutoModelForPreTraining.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Optional[int] ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Optional[int] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
__lowercase : Union[str, Any] = TFAutoModelForCausalLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Dict ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : str = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : List[str] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Tuple ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : Optional[int] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Optional[int] = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ )
__lowercase : Dict = TFAutoModelForMaskedLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : List[str] ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase : int = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Any = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ )
__lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : str ):
for model_name in ["bert-base-uncased"]:
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case_ ( self : Union[str, Any] ):
for model_name in ["bert-base-uncased"]:
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Dict = TFAutoModelForQuestionAnswering.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
@require_tensorflow_probability
def snake_case_ ( self : Union[str, Any] ):
for model_name in TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]:
__lowercase : Any = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Union[str, Any] = TFAutoModelForTableQuestionAnswering.from_pretrained(lowerCAmelCase__ )
__lowercase : List[str] = TFAutoModelForTableQuestionAnswering.from_pretrained(
lowerCAmelCase__ , output_loading_info=lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self : Any ):
__lowercase : Optional[int] = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
def snake_case_ ( self : Optional[Any] ):
__lowercase : int = TFAutoModelWithLMHead.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual(model.num_parameters() , 1_4410 )
self.assertEqual(model.num_parameters(only_trainable=lowerCAmelCase__ ) , 1_4410 )
def snake_case_ ( self : Tuple ):
__lowercase : str = TFAutoModel.from_pretrained('''sgugger/funnel-random-tiny''' )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
__lowercase : Union[str, Any] = copy.deepcopy(model.config )
__lowercase : Union[str, Any] = ["FunnelBaseModel"]
__lowercase : Dict = TFAutoModel.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
__lowercase : List[str] = TFAutoModel.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case_ ( self : Union[str, Any] ):
try:
AutoConfig.register('''new-model''' , lowerCAmelCase__ )
__lowercase : Tuple = [
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSequenceClassification,
TFAutoModelForTokenClassification,
]
for auto_class in auto_classes:
with self.subTest(auto_class.__name__ ):
# Wrong config class will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
auto_class.register(lowerCAmelCase__ , lowerCAmelCase__ )
# Now that the config is registered, it can be used as any other config with the auto-API
__lowercase : List[str] = BertModelTester(self ).get_config()
__lowercase : Dict = NewModelConfig(**tiny_config.to_dict() )
__lowercase : Optional[int] = auto_class.from_config(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
model.save_pretrained(lowerCAmelCase__ )
__lowercase : int = auto_class.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
for mapping in (
TF_MODEL_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
):
if NewModelConfig in mapping._extra_content:
del mapping._extra_content[NewModelConfig]
def snake_case_ ( self : Optional[Any] ):
with self.assertRaisesRegex(
lowerCAmelCase__ , '''bert-base is not a local folder and is not a valid model identifier''' ):
__lowercase : Dict = TFAutoModel.from_pretrained('''bert-base''' )
def snake_case_ ( self : str ):
with self.assertRaisesRegex(
lowerCAmelCase__ , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
__lowercase : int = TFAutoModel.from_pretrained(lowerCAmelCase__ , revision='''aaaaaa''' )
def snake_case_ ( self : Tuple ):
with self.assertRaisesRegex(
lowerCAmelCase__ , '''hf-internal-testing/config-no-model does not appear to have a file named pytorch_model.bin''' , ):
__lowercase : Optional[int] = TFAutoModel.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case_ ( self : Optional[Any] ):
with self.assertRaisesRegex(lowerCAmelCase__ , '''Use `from_pt=True` to load this model''' ):
__lowercase : Optional[Any] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-bert-pt-only''' )
def snake_case_ ( self : Dict ):
__lowercase : Any = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
__lowercase : List[str] = TFAutoModel.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
# With a sharded checkpoint
__lowercase : Any = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
with RequestCounter() as counter:
__lowercase : Dict = TFAutoModel.from_pretrained('''ArthurZ/tiny-random-bert-sharded''' )
self.assertEqual(counter.get_request_count , 0 )
self.assertEqual(counter.head_request_count , 1 )
self.assertEqual(counter.other_request_count , 0 )
| 156
|
from math import pi
def _A ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10))
| 95
| 0
|
import numpy as np
def _a ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
__lowerCAmelCase = int(np.ceil((x_end - xa) / h ) )
__lowerCAmelCase = np.zeros((n + 1,) )
__lowerCAmelCase = ya
__lowerCAmelCase = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
__lowerCAmelCase = f(SCREAMING_SNAKE_CASE_ , y[k] )
__lowerCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase = f(x + h , y[k] + h * ka )
__lowerCAmelCase = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102
|
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
UpperCamelCase__ = datasets.logging.get_logger(__name__)
UpperCamelCase__ = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
UpperCamelCase__ = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
UpperCamelCase__ = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _a ( SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict=False , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : int=False , SCREAMING_SNAKE_CASE_ : Optional[int]="dummy_doc" ):
__lowerCAmelCase = {doc: key_lines}
__lowerCAmelCase = {doc: sys_lines}
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
key_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase , __lowerCAmelCase = reader.get_doc_mentions(SCREAMING_SNAKE_CASE_ , sys_doc_lines[doc] , SCREAMING_SNAKE_CASE_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
__lowerCAmelCase = reader.set_annotated_parse_trees(SCREAMING_SNAKE_CASE_ , key_doc_lines[doc] , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if remove_nested:
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
__lowerCAmelCase , __lowerCAmelCase = reader.remove_nested_coref_mentions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = reader.get_mention_assignments(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"Number of removed nested coreferring mentions in the key "
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"Number of resulting singleton clusters in the key "
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"files, respectively" )
return doc_coref_infos
def _a ( SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
__lowerCAmelCase = get_coref_infos(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
for name, metric in metrics:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = evaluator.evaluate_documents(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(10 ) , F"""Recall: {recall * 1_00:.2f}""" , F""" Precision: {precision * 1_00:.2f}""" , F""" F1: {fa * 1_00:.2f}""" , )
if conll_subparts_num == 3:
__lowerCAmelCase = (conll / 3) * 1_00
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"conll_score": conll} )
return output_scores
def _a ( SCREAMING_SNAKE_CASE_ : List[str] ):
__lowerCAmelCase = False
for line in key_lines:
if not line.startswith("#" ):
if len(line.split() ) > 6:
__lowerCAmelCase = line.split()[5]
if not parse_col == "-":
__lowerCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" ) ),
"references": datasets.Sequence(datasets.Value("string" ) ),
} ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[
"https://github.com/ns-moosavi/coval",
"https://www.aclweb.org/anthology/P16-1060",
"http://www.conll.cemantix.org/2012/data.html",
] , )
def __SCREAMING_SNAKE_CASE( self , _A , _A , _A=True , _A=False , _A=False , _A=False ):
"""simple docstring"""
__lowerCAmelCase = [
("mentions", evaluator.mentions),
("muc", evaluator.muc),
("bcub", evaluator.b_cubed),
("ceafe", evaluator.ceafe),
("lea", evaluator.lea),
]
if min_span:
__lowerCAmelCase = util.check_gold_parse_annotation(_A )
if not has_gold_parse:
raise NotImplementedError("References should have gold parse annotation to use 'min_span'." )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
__lowerCAmelCase = evaluate(
key_lines=_A , sys_lines=_A , metrics=_A , NP_only=_A , remove_nested=_A , keep_singletons=_A , min_span=_A , )
return score
| 102
| 1
|
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
lowerCAmelCase__ = "base_with_context"
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowercase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__ : List[str] = weights[F"""layers_{lyr_num}"""]
lowercase__ : Optional[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowercase__ : Union[str, Any] = ly_weight["attention"]
lowercase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowercase__ : Union[str, Any] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_snake_case )
for lyr_num, lyr in enumerate(model.encoders ):
lowercase__ : str = weights[F"""layers_{lyr_num}"""]
lowercase__ : Optional[int] = ly_weight["attention"]
lowercase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase__ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase__ : int = nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowercase__ : Dict = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase__ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase__ : str = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase__ : List[str] = nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowercase__ : int = nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowercase__ : List[str] = nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=_snake_case )
lowercase__ : Optional[int] = nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowercase__ : str = weights[F"""layers_{lyr_num}"""]
lowercase__ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowercase__ : List[Any] = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowercase__ : Dict = ly_weight["self_attention"]
lowercase__ : int = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase__ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase__ : Tuple = ly_weight["MultiHeadDotProductAttention_0"]
lowercase__ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowercase__ : Dict = nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowercase__ : Any = nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowercase__ : str = nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowercase__ : Tuple = nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowercase__ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowercase__ : Dict = nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowercase__ : Optional[int] = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowercase__ : int = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowercase__ : Any = nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowercase__ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowercase__ : Any = nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : str = checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowercase__ : Dict = jnp.tree_util.tree_map(onp.array , _snake_case )
lowercase__ : str = [
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowercase__ : Any = os.path.join(args.checkpoint_path , ".." , "config.gin" )
lowercase__ : Union[str, Any] = inference.parse_training_gin_file(_snake_case , _snake_case )
lowercase__ : Optional[Any] = inference.InferenceModel(args.checkpoint_path , _snake_case )
lowercase__ : List[Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
lowercase__ : Tuple = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowercase__ : List[str] = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowercase__ : str = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowercase__ : Union[str, Any] = load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , _snake_case )
lowercase__ : Dict = load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , _snake_case )
lowercase__ : Tuple = load_decoder(ta_checkpoint["target"]["decoder"] , _snake_case )
lowercase__ : Any = OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowercase__ : List[str] = SpectrogramDiffusionPipeline(
notes_encoder=_snake_case , continuous_encoder=_snake_case , decoder=_snake_case , scheduler=_snake_case , melgan=_snake_case , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''')
parser.add_argument(
'''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.'''
)
parser.add_argument(
'''--checkpoint_path''',
default=f'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help='''Path to the original jax model checkpoint.''',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 130
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def UpperCamelCase_( _snake_case : int , _snake_case : int ):
"""simple docstring"""
__a =old_name
if "patch_embed" in old_name:
__a , __a , __a =old_name.split('.' )
if layer == "0":
__a =old_name.replace('0' , 'convolution1' )
elif layer == "1":
__a =old_name.replace('1' , 'batchnorm_before' )
elif layer == "3":
__a =old_name.replace('3' , 'convolution2' )
else:
__a =old_name.replace('4' , 'batchnorm_after' )
if "network" in old_name and re.search(r'\d\.\d' , _snake_case ):
__a =r'\b\d{2}\b'
if bool(re.search(_snake_case , _snake_case ) ):
__a =re.search(r'\d\.\d\d.' , _snake_case ).group()
else:
__a =re.search(r'\d\.\d.' , _snake_case ).group()
if int(match[0] ) < 6:
__a =old_name.replace(_snake_case , '' )
__a =trimmed_name.replace('network' , match[0] + '.meta4D_layers.blocks.' + match[2:-1] )
__a ='intermediate_stages.' + trimmed_name
else:
__a =old_name.replace(_snake_case , '' )
if int(match[2] ) < num_meta4D_last_stage:
__a =trimmed_name.replace('network' , 'meta4D_layers.blocks.' + match[2] )
else:
__a =str(int(match[2] ) - num_meta4D_last_stage )
__a =trimmed_name.replace('network' , 'meta3D_layers.blocks.' + layer_index )
if "norm1" in old_name:
__a =trimmed_name.replace('norm1' , 'layernorm1' )
elif "norm2" in old_name:
__a =trimmed_name.replace('norm2' , 'layernorm2' )
elif "fc1" in old_name:
__a =trimmed_name.replace('fc1' , 'linear_in' )
elif "fc2" in old_name:
__a =trimmed_name.replace('fc2' , 'linear_out' )
__a ='last_stage.' + trimmed_name
elif "network" in old_name and re.search(r'.\d.' , _snake_case ):
__a =old_name.replace('network' , 'intermediate_stages' )
if "fc" in new_name:
__a =new_name.replace('fc' , 'convolution' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
__a =new_name.replace('norm1' , 'batchnorm_before' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
__a =new_name.replace('norm2' , 'batchnorm_after' )
if "proj" in new_name:
__a =new_name.replace('proj' , 'projection' )
if "dist_head" in new_name:
__a =new_name.replace('dist_head' , 'distillation_classifier' )
elif "head" in new_name:
__a =new_name.replace('head' , 'classifier' )
elif "patch_embed" in new_name:
__a ='efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
__a =new_name.replace('norm' , 'layernorm' )
__a ='efficientformer.' + new_name
else:
__a ='efficientformer.encoder.' + new_name
return new_name
def UpperCamelCase_( _snake_case : List[str] , _snake_case : Dict ):
"""simple docstring"""
for key in checkpoint.copy().keys():
__a =checkpoint.pop(_snake_case )
__a =val
return checkpoint
def UpperCamelCase_( ):
"""simple docstring"""
__a ='http://images.cocodataset.org/val2017/000000039769.jpg'
__a =Image.open(requests.get(_snake_case , stream=_snake_case ).raw )
return image
def UpperCamelCase_( _snake_case : Path , _snake_case : Path , _snake_case : Path , _snake_case : bool ):
"""simple docstring"""
__a =torch.load(_snake_case , map_location='cpu' )['model']
__a =EfficientFormerConfig.from_json_file(_snake_case )
__a =EfficientFormerForImageClassificationWithTeacher(_snake_case )
__a ='_'.join(checkpoint_path.split('/' )[-1].split('.' )[0].split('_' )[:-1] )
__a =config.depths[-1] - config.num_metaad_blocks + 1
__a =convert_torch_checkpoint(_snake_case , _snake_case )
model.load_state_dict(_snake_case )
model.eval()
__a ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
__a =prepare_img()
__a =256
__a =224
__a =EfficientFormerImageProcessor(
size={'shortest_edge': image_size} , crop_size={'height': crop_size, 'width': crop_size} , resample=pillow_resamplings['bicubic'] , )
__a =processor(images=_snake_case , return_tensors='pt' ).pixel_values
# original processing pipeline
__a =Compose(
[
Resize(_snake_case , interpolation=pillow_resamplings['bicubic'] ),
CenterCrop(_snake_case ),
ToTensor(),
Normalize(_snake_case , _snake_case ),
] )
__a =image_transforms(_snake_case ).unsqueeze(0 )
assert torch.allclose(_snake_case , _snake_case )
__a =model(_snake_case )
__a =outputs.logits
__a =(1, 1000)
if "l1" in model_name:
__a =torch.Tensor(
[-0.1_312, 0.4_353, -1.0_499, -0.5_124, 0.4_183, -0.6_793, -1.3_777, -0.0_893, -0.7_358, -2.4_328] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
__a =torch.Tensor(
[-1.3_150, -1.5_456, -1.2_556, -0.8_496, -0.7_127, -0.7_897, -0.9_728, -0.3_052, 0.3_751, -0.3_127] )
assert torch.allclose(logits[0, :10] , _snake_case , atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
__a =torch.Tensor(
[-1.0_283, -1.4_131, -0.5_644, -1.3_115, -0.5_785, -1.2_049, -0.7_528, 0.1_992, -0.3_822, -0.0_878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(_snake_case ).mkdir(exist_ok=_snake_case )
model.save_pretrained(_snake_case )
print(F'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(_snake_case )
print(F'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('Pushing model to the hub...' )
model.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add model' , use_temp_dir=_snake_case , )
processor.push_to_hub(
repo_id=F'Bearnardd/{pytorch_dump_path}' , commit_message='Add image processor' , use_temp_dir=_snake_case , )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 218
| 0
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__UpperCAmelCase = logging.getLogger(__name__)
def __UpperCamelCase ( ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ : Dict = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=lowercase__ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=lowercase__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=lowercase__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=lowercase__ , default="""data/dump""" , help="""The dump file prefix.""" )
lowerCAmelCase_ : int = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
lowerCAmelCase_ : Optional[Any] = BertTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : Optional[Any] = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
lowerCAmelCase_ : Any = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
lowerCAmelCase_ : Tuple = RobertaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : List[Any] = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
lowerCAmelCase_ : List[Any] = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
lowerCAmelCase_ : str = GPTaTokenizer.from_pretrained(args.tokenizer_name )
lowerCAmelCase_ : str = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
lowerCAmelCase_ : List[str] = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
lowerCAmelCase_ : List[Any] = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'{len(lowercase__ )} examples to process.' )
lowerCAmelCase_ : str = []
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : Union[str, Any] = 10000
lowerCAmelCase_ : Optional[int] = time.time()
for text in data:
lowerCAmelCase_ : Dict = f'{bos} {text.strip()} {sep}'
lowerCAmelCase_ : Optional[Any] = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
rslt.append(lowercase__ )
iter += 1
if iter % interval == 0:
lowerCAmelCase_ : str = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
lowerCAmelCase_ : List[Any] = time.time()
logger.info("""Finished binarization""" )
logger.info(f'{len(lowercase__ )} examples processed.' )
lowerCAmelCase_ : Union[str, Any] = f'{args.dump_file}.{args.tokenizer_name}.pickle'
lowerCAmelCase_ : int = tokenizer.vocab_size
if vocab_size < (1 << 16):
lowerCAmelCase_ : Optional[Any] = [np.uintaa(lowercase__ ) for d in rslt]
else:
lowerCAmelCase_ : List[str] = [np.intaa(lowercase__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(lowercase__ , """wb""" ) as handle:
pickle.dump(rslt_ , lowercase__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 28
|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class __a ( __UpperCamelCase ):
__snake_case : torch.FloatTensor
__snake_case : torch.FloatTensor
__snake_case : Optional[torch.FloatTensor] = None
class __a ( __UpperCamelCase ,__UpperCamelCase ):
__snake_case : Optional[Any] = 2
@register_to_config
def __init__( self : str , UpperCAmelCase : float = 0.02 , UpperCAmelCase : float = 1_00 , UpperCAmelCase : float = 1.007 , UpperCAmelCase : float = 80 , UpperCAmelCase : float = 0.05 , UpperCAmelCase : float = 50 , ):
# standard deviation of the initial noise distribution
lowerCAmelCase_ : List[Any] = sigma_max
# setable values
lowerCAmelCase_ : int = None
lowerCAmelCase_ : np.IntTensor = None
lowerCAmelCase_ : torch.FloatTensor = None # sigma(t_i)
def A ( self : Any , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[int] = None ):
return sample
def A ( self : int , UpperCAmelCase : int , UpperCAmelCase : Union[str, torch.device] = None ):
lowerCAmelCase_ : Dict = num_inference_steps
lowerCAmelCase_ : Dict = np.arange(0 , self.num_inference_steps )[::-1].copy()
lowerCAmelCase_ : str = torch.from_numpy(UpperCAmelCase ).to(UpperCAmelCase )
lowerCAmelCase_ : List[str] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
lowerCAmelCase_ : Dict = torch.tensor(UpperCAmelCase , dtype=torch.floataa , device=UpperCAmelCase )
def A ( self : str , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : Optional[torch.Generator] = None ):
if self.config.s_min <= sigma <= self.config.s_max:
lowerCAmelCase_ : List[str] = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
lowerCAmelCase_ : List[str] = 0
# sample eps ~ N(0, S_noise^2 * I)
lowerCAmelCase_ : Any = self.config.s_noise * randn_tensor(sample.shape , generator=UpperCAmelCase ).to(sample.device )
lowerCAmelCase_ : int = sigma + gamma * sigma
lowerCAmelCase_ : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A ( self : Optional[int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : List[str] = sample_hat + sigma_hat * model_output
lowerCAmelCase_ : Optional[Any] = (sample_hat - pred_original_sample) / sigma_hat
lowerCAmelCase_ : Tuple = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : List[str] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : float , UpperCAmelCase : float , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True , ):
lowerCAmelCase_ : Any = sample_prev + sigma_prev * model_output
lowerCAmelCase_ : Optional[int] = (sample_prev - pred_original_sample) / sigma_prev
lowerCAmelCase_ : str = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=UpperCAmelCase , derivative=UpperCAmelCase , pred_original_sample=UpperCAmelCase )
def A ( self : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : int , UpperCAmelCase : List[str] ):
raise NotImplementedError()
| 28
| 1
|
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = ["""bert-base-uncased""", """bert-base-cased"""]
_SCREAMING_SNAKE_CASE : Union[str, Any] = """hf-internal-testing/tiny-bert-tf-only"""
if is_tf_available():
class _snake_case ( tf.keras.Model ):
def __init__( self , a__ ) -> List[Any]:
'''simple docstring'''
super().__init__()
snake_case_ = tokenizer
snake_case_ = AutoConfig.from_pretrained(a__ )
snake_case_ = TFAutoModel.from_config(a__ )
def lowerCAmelCase__ ( self , a__ ) -> Tuple:
'''simple docstring'''
snake_case_ = self.tokenizer(a__ )
snake_case_ = self.bert(**a__ )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class _snake_case ( unittest.TestCase ):
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
super().setUp()
snake_case_ = [
BertTokenizer.from_pretrained(a__ ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
snake_case_ = [TFBertTokenizer.from_pretrained(a__ ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(a__ , use_fast_bert_tokenizer=a__ )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case_ = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
snake_case_ = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
snake_case_ = tokenizer(a__ , return_tensors="tf" , padding="longest" )
snake_case_ = tf_tokenizer(a__ )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def lowerCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ = tf_tokenizer(self.paired_sentences )
snake_case_ = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ = tf.function(a__ )
for test_inputs in (self.test_sentences, self.paired_sentences):
snake_case_ = tf.constant(a__ )
snake_case_ = compiled_tokenizer(a__ )
snake_case_ = tf_tokenizer(a__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case_ = ModelToSave(tokenizer=a__ )
snake_case_ = tf.convert_to_tensor(self.test_sentences )
snake_case_ = model(a__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case_ = Path(a__ ) / """saved.model"""
model.save(a__ )
snake_case_ = tf.keras.models.load_model(a__ )
snake_case_ = loaded_model(a__ )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 85
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __snake_case :
def __init__( self ,snake_case ,):
'''simple docstring'''
lowercase : Any = parent
lowercase : Tuple = 13
lowercase : str = 7
lowercase : Dict = True
lowercase : Dict = True
lowercase : str = True
lowercase : List[str] = True
lowercase : int = True
lowercase : Union[str, Any] = False
lowercase : Dict = False
lowercase : List[Any] = False
lowercase : List[Any] = 2
lowercase : Optional[Any] = 99
lowercase : int = 0
lowercase : Tuple = 32
lowercase : int = 2
lowercase : Tuple = 4
lowercase : List[Any] = 0.1
lowercase : Tuple = 0.1
lowercase : List[Any] = 512
lowercase : int = 16
lowercase : Dict = 2
lowercase : int = 0.02
lowercase : Union[str, Any] = 3
lowercase : Any = 4
lowercase : List[Any] = """last"""
lowercase : Tuple = True
lowercase : List[Any] = None
lowercase : Any = 0
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
lowercase : Tuple = None
if self.use_input_lengths:
lowercase : List[str] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Tuple = None
if self.use_token_type_ids:
lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
lowercase : List[str] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : List[str] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowercase : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
lowercase : str = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
lowercase : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
lowercase : str = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertModel(config=snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : Optional[Any] = model(snake_case )
lowercase : List[Any] = [input_ids, input_mask]
lowercase : int = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertWithLMHeadModel(snake_case )
lowercase : Optional[Any] = {"""input_ids""": input_ids, """lengths""": input_lengths, """langs""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Tuple = TFFlaubertForQuestionAnsweringSimple(snake_case )
lowercase : Union[str, Any] = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : Tuple = model(snake_case )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Union[str, Any] = TFFlaubertForSequenceClassification(snake_case )
lowercase : str = {"""input_ids""": input_ids, """lengths""": input_lengths}
lowercase : str = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_labels
lowercase : List[str] = TFFlaubertForTokenClassification(config=snake_case )
lowercase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ,):
'''simple docstring'''
lowercase : Any = self.num_choices
lowercase : Dict = TFFlaubertForMultipleChoice(config=snake_case )
lowercase : Any = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Optional[Any] = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Dict = tf.tile(tf.expand_dims(snake_case ,1 ) ,(1, self.num_choices, 1) )
lowercase : Union[str, Any] = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
lowercase : int = model(snake_case )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : int = config_and_inputs
lowercase : List[str] = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""langs""": token_type_ids,
"""lengths""": input_lengths,
}
return config, inputs_dict
@require_tf
class __snake_case ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
_a : Dict= (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
_a : Optional[Any]= (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_a : Any= (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
_a : Tuple= False
_a : int= False
def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = TFFlaubertModelTester(self )
lowercase : List[Any] = ConfigTester(self ,config_class=snake_case ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*snake_case )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*snake_case )
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Dict = TFFlaubertModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@require_tf
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
@slow
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = TFFlaubertModel.from_pretrained("""jplu/tf-flaubert-small-cased""" )
lowercase : int = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
lowercase : Dict = model(snake_case )[0]
lowercase : Union[str, Any] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,snake_case )
# compare the actual values for a slice.
lowercase : Tuple = tf.convert_to_tensor(
[
[
[-1.8_768_773, -1.566_555, 0.27_072_418],
[-1.6_920_038, -0.5_873_505, 1.9_329_599],
[-2.9_563_985, -1.6_993_835, 1.7_972_052],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 20
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=7 ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=18 ,SCREAMING_SNAKE_CASE__=30 ,SCREAMING_SNAKE_CASE__=4_00 ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=True ,SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] ,SCREAMING_SNAKE_CASE__=[0.5, 0.5, 0.5] ,) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = size if size is not None else {'''height''': 18, '''width''': 18}
__SCREAMING_SNAKE_CASE :Tuple = parent
__SCREAMING_SNAKE_CASE :int = batch_size
__SCREAMING_SNAKE_CASE :str = num_channels
__SCREAMING_SNAKE_CASE :List[str] = image_size
__SCREAMING_SNAKE_CASE :Optional[Any] = min_resolution
__SCREAMING_SNAKE_CASE :Union[str, Any] = max_resolution
__SCREAMING_SNAKE_CASE :Dict = do_resize
__SCREAMING_SNAKE_CASE :str = size
__SCREAMING_SNAKE_CASE :str = do_normalize
__SCREAMING_SNAKE_CASE :List[str] = image_mean
__SCREAMING_SNAKE_CASE :str = image_std
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = DPTImageProcessor if is_vision_available() else None
def _UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = DPTImageProcessingTester(self )
@property
def _UpperCamelCase ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''image_mean''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''image_std''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''do_normalize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''do_resize''' ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ ,'''size''' ) )
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'''height''': 18, '''width''': 18} )
__SCREAMING_SNAKE_CASE :Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'''height''': 42, '''width''': 42} )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE :List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE :List[str] = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :str = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE :Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,numpify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE :Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :Dict = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE :Tuple = prepare_image_inputs(self.image_processor_tester ,equal_resolution=SCREAMING_SNAKE_CASE__ ,torchify=SCREAMING_SNAKE_CASE__ )
for image in image_inputs:
self.assertIsInstance(SCREAMING_SNAKE_CASE__ ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE :Tuple = image_processing(image_inputs[0] ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE :Union[str, Any] = image_processing(SCREAMING_SNAKE_CASE__ ,return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) ,)
| 365
|
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def __lowerCamelCase ( a_ : float , a_ : float , a_ : int ) -> float:
__SCREAMING_SNAKE_CASE :List[Any] = x
__SCREAMING_SNAKE_CASE :List[Any] = y
for step in range(a_ ): # noqa: B007
__SCREAMING_SNAKE_CASE :Dict = a * a - b * b + x
__SCREAMING_SNAKE_CASE :Tuple = 2 * a * b + y
__SCREAMING_SNAKE_CASE :Dict = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def __lowerCamelCase ( a_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def __lowerCamelCase ( a_ : float ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a_ , 1 , 1 ) )
def __lowerCamelCase ( a_ : int = 8_00 , a_ : int = 6_00 , a_ : float = -0.6 , a_ : float = 0 , a_ : float = 3.2 , a_ : int = 50 , a_ : bool = True , ) -> Image.Image:
__SCREAMING_SNAKE_CASE :Optional[int] = Image.new('''RGB''' , (image_width, image_height) )
__SCREAMING_SNAKE_CASE :Tuple = img.load()
# loop through the image-coordinates
for image_x in range(a_ ):
for image_y in range(a_ ):
# determine the figure-coordinates based on the image-coordinates
__SCREAMING_SNAKE_CASE :Dict = figure_width / image_width * image_height
__SCREAMING_SNAKE_CASE :str = figure_center_x + (image_x / image_width - 0.5) * figure_width
__SCREAMING_SNAKE_CASE :Tuple = figure_center_y + (image_y / image_height - 0.5) * figure_height
__SCREAMING_SNAKE_CASE :List[Any] = get_distance(a_ , a_ , a_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__SCREAMING_SNAKE_CASE :Optional[int] = get_color_coded_rgb(a_ )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = get_black_and_white_rgb(a_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
lowerCamelCase_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 239
| 0
|
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def A_ ( a , a=0.9_99 , a="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(a ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(a ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
SCREAMING_SNAKE_CASE_ : Tuple = []
for i in range(a ):
SCREAMING_SNAKE_CASE_ : Optional[int] = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE_ : Optional[int] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(a ) / alpha_bar_fn(a ) , a ) )
return torch.tensor(a , dtype=torch.floataa )
class _A ( __magic_name__ , __magic_name__):
SCREAMING_SNAKE_CASE : str = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE : Dict = 2
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 1000 , _SCREAMING_SNAKE_CASE = 0.00085 , _SCREAMING_SNAKE_CASE = 0.012 , _SCREAMING_SNAKE_CASE = "linear" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "epsilon" , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = "linspace" , _SCREAMING_SNAKE_CASE = 0 , ):
"""simple docstring"""
if trained_betas is not None:
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE_ : int = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , _SCREAMING_SNAKE_CASE , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE_ : Optional[int] = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE , alpha_transform_type='cosine' )
elif beta_schedule == "exp":
SCREAMING_SNAKE_CASE_ : Dict = betas_for_alpha_bar(_SCREAMING_SNAKE_CASE , alpha_transform_type='exp' )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
SCREAMING_SNAKE_CASE_ : str = 1.0 - self.betas
SCREAMING_SNAKE_CASE_ : List[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[Any] = use_karras_sigmas
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
if schedule_timesteps is None:
SCREAMING_SNAKE_CASE_ : List[str] = self.timesteps
SCREAMING_SNAKE_CASE_ : str = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
SCREAMING_SNAKE_CASE_ : List[Any] = 1 if len(_SCREAMING_SNAKE_CASE ) > 1 else 0
else:
SCREAMING_SNAKE_CASE_ : List[Any] = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
SCREAMING_SNAKE_CASE_ : int = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : List[str] = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = num_inference_steps
SCREAMING_SNAKE_CASE_ : str = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
SCREAMING_SNAKE_CASE_ : Dict = np.linspace(0 , num_train_timesteps - 1 , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )[::-1].copy()
elif self.config.timestep_spacing == "leading":
SCREAMING_SNAKE_CASE_ : List[str] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ : Any = (np.arange(0 , _SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1].copy().astype(_SCREAMING_SNAKE_CASE )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
SCREAMING_SNAKE_CASE_ : Optional[int] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE_ : List[Any] = (np.arange(_SCREAMING_SNAKE_CASE , 0 , -step_ratio )).round().copy().astype(_SCREAMING_SNAKE_CASE )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
SCREAMING_SNAKE_CASE_ : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
SCREAMING_SNAKE_CASE_ : Optional[Any] = np.log(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = np.interp(_SCREAMING_SNAKE_CASE , np.arange(0 , len(_SCREAMING_SNAKE_CASE ) ) , _SCREAMING_SNAKE_CASE )
if self.config.use_karras_sigmas:
SCREAMING_SNAKE_CASE_ : Optional[Any] = self._convert_to_karras(in_sigmas=_SCREAMING_SNAKE_CASE , num_inference_steps=self.num_inference_steps )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.array([self._sigma_to_t(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for sigma in sigmas] )
SCREAMING_SNAKE_CASE_ : Optional[int] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
SCREAMING_SNAKE_CASE_ : List[str] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Any = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ : List[Any] = timesteps.to(_SCREAMING_SNAKE_CASE , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = timesteps.to(device=_SCREAMING_SNAKE_CASE )
# empty dt and derivative
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
SCREAMING_SNAKE_CASE_ : List[str] = defaultdict(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = np.log(_SCREAMING_SNAKE_CASE )
# get distribution
SCREAMING_SNAKE_CASE_ : int = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
SCREAMING_SNAKE_CASE_ : List[str] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
SCREAMING_SNAKE_CASE_ : Tuple = low_idx + 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = log_sigmas[low_idx]
SCREAMING_SNAKE_CASE_ : str = log_sigmas[high_idx]
# interpolate sigmas
SCREAMING_SNAKE_CASE_ : Any = (low - log_sigma) / (low - high)
SCREAMING_SNAKE_CASE_ : Dict = np.clip(_SCREAMING_SNAKE_CASE , 0 , 1 )
# transform interpolation to time range
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (1 - w) * low_idx + w * high_idx
SCREAMING_SNAKE_CASE_ : Tuple = t.reshape(sigma.shape )
return t
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : float = in_sigmas[-1].item()
SCREAMING_SNAKE_CASE_ : float = in_sigmas[0].item()
SCREAMING_SNAKE_CASE_ : int = 7.0 # 7.0 is the value used in the paper
SCREAMING_SNAKE_CASE_ : Any = np.linspace(0 , 1 , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ : Dict = sigma_min ** (1 / rho)
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigma_max ** (1 / rho)
SCREAMING_SNAKE_CASE_ : Optional[int] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCAmelCase ( self ):
"""simple docstring"""
return self.dt is None
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.index_for_timestep(_SCREAMING_SNAKE_CASE )
# advance index counter by 1
SCREAMING_SNAKE_CASE_ : Optional[Any] = timestep.cpu().item() if torch.is_tensor(_SCREAMING_SNAKE_CASE ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.sigmas[step_index]
SCREAMING_SNAKE_CASE_ : List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ : Tuple = self.sigmas[step_index - 1]
SCREAMING_SNAKE_CASE_ : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
SCREAMING_SNAKE_CASE_ : Optional[Any] = 0
SCREAMING_SNAKE_CASE_ : str = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE_ : List[Any] = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ : str = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigma_hat if self.state_in_first_order else sigma_next
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE_ : str = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
SCREAMING_SNAKE_CASE_ : int = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
SCREAMING_SNAKE_CASE_ : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
SCREAMING_SNAKE_CASE_ : List[str] = sigma_next - sigma_hat
# store for 2nd order step
SCREAMING_SNAKE_CASE_ : Dict = derivative
SCREAMING_SNAKE_CASE_ : Optional[int] = dt
SCREAMING_SNAKE_CASE_ : List[str] = sample
else:
# 2. 2nd order / Heun's method
SCREAMING_SNAKE_CASE_ : List[str] = (sample - pred_original_sample) / sigma_next
SCREAMING_SNAKE_CASE_ : Optional[int] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
SCREAMING_SNAKE_CASE_ : Any = self.dt
SCREAMING_SNAKE_CASE_ : List[str] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : List[str] = None
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Optional[int] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(_SCREAMING_SNAKE_CASE ):
# mps does not support float64
SCREAMING_SNAKE_CASE_ : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ : Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
SCREAMING_SNAKE_CASE_ : Tuple = self.timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ : Any = timesteps.to(original_samples.device )
SCREAMING_SNAKE_CASE_ : Any = [self.index_for_timestep(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for t in timesteps]
SCREAMING_SNAKE_CASE_ : int = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
SCREAMING_SNAKE_CASE_ : Any = sigma.unsqueeze(-1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 253
|
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCAmelCase : List[str] = {
'169M': 12,
'430M': 24,
'1B5': 24,
'3B': 32,
'7B': 32,
'14B': 40,
}
lowerCAmelCase : Tuple = {
'169M': 7_68,
'430M': 10_24,
'1B5': 20_48,
'3B': 25_60,
'7B': 40_96,
'14B': 51_20,
}
def A_ ( a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = list(state_dict.keys() )
for name in state_dict_keys:
SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(a )
# emb -> embedding
if name.startswith('emb.' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
SCREAMING_SNAKE_CASE_ : Optional[Any] = re.sub(R'blocks\.(\d+)\.att' , R'blocks.\1.attention' , a )
# ffn -> feed_forward
SCREAMING_SNAKE_CASE_ : Any = re.sub(R'blocks\.(\d+)\.ffn' , R'blocks.\1.feed_forward' , a )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
SCREAMING_SNAKE_CASE_ : Any = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
SCREAMING_SNAKE_CASE_ : Any = 'rwkv.' + name
SCREAMING_SNAKE_CASE_ : Dict = weight
return state_dict
def A_ ( a , a , a , a=None , a=None , a=False , a=None ):
"""simple docstring"""
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = 5_0_2_7_7
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(a )
tokenizer.save_pretrained(a )
# 2. Build the config
SCREAMING_SNAKE_CASE_ : List[Any] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
SCREAMING_SNAKE_CASE_ : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
SCREAMING_SNAKE_CASE_ : str = RwkvConfig(
vocab_size=a , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(a )
# 3. Download model file then convert state_dict
SCREAMING_SNAKE_CASE_ : List[Any] = hf_hub_download(a , a )
SCREAMING_SNAKE_CASE_ : int = torch.load(a , map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = convert_state_dict(a )
# 4. Split in shards and save
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = shard_checkpoint(a )
for shard_file, shard in shards.items():
torch.save(a , os.path.join(a , a ) )
if index is not None:
SCREAMING_SNAKE_CASE_ : Any = os.path.join(a , a )
# Save the index as well
with open(a , 'w' , encoding='utf-8' ) as f:
SCREAMING_SNAKE_CASE_ : int = json.dumps(a , indent=2 , sort_keys=a ) + '\n'
f.write(a )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
SCREAMING_SNAKE_CASE_ : List[Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
SCREAMING_SNAKE_CASE_ : List[str] = torch.load(os.path.join(a , a ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(a , a ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(a )
model.push_to_hub(a , max_shard_size='2GB' )
tokenizer.push_to_hub(a )
if __name__ == "__main__":
lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--repo_id', default=None, type=str, required=True, help='Repo ID from which to pull the checkpoint.'
)
parser.add_argument(
'--checkpoint_file', default=None, type=str, required=True, help='Name of the checkpoint file in the repo.'
)
parser.add_argument(
'--output_dir', default=None, type=str, required=True, help='Where to save the converted model.'
)
parser.add_argument(
'--tokenizer_file',
default=None,
type=str,
help='Path to the tokenizer file to use (if not provided, only the model is converted).',
)
parser.add_argument(
'--size',
default=None,
type=str,
help='Size of the model. Will be inferred from the `checkpoint_file` if not passed.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Push to the Hub the converted model.',
)
parser.add_argument(
'--model_name',
default=None,
type=str,
help='Name of the pushed model on the Hub, including the username / organization.',
)
lowerCAmelCase : Optional[int] = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 253
| 1
|
import os
def A_ ( A__ = "matrix.txt" ) -> int:
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
a__ : int = in_file.read()
a__ : str = [[int(A__ ) for cell in row.split(',' )] for row in data.strip().splitlines()]
a__ : List[Any] = [[0 for cell in row] for row in grid]
a__ : Any = len(grid[0] )
a__ : List[Any] = [[0 for i in range(A__ )] for j in range(A__ )]
a__ : Union[str, Any] = grid[0][0]
for i in range(1 , A__ ):
a__ : List[Any] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
a__ : Dict = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
a__ : Dict = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 225
|
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.02 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[Any]:
'''simple docstring'''
a__ : Any = parent
a__ : int = batch_size
a__ : Dict = seq_length
a__ : Tuple = is_training
a__ : Any = use_input_mask
a__ : Optional[Any] = use_token_type_ids
a__ : Dict = use_labels
a__ : Optional[int] = vocab_size
a__ : List[Any] = hidden_size
a__ : int = num_hidden_layers
a__ : Optional[Any] = num_attention_heads
a__ : str = intermediate_size
a__ : Optional[int] = hidden_act
a__ : Dict = hidden_dropout_prob
a__ : Optional[int] = attention_probs_dropout_prob
a__ : Tuple = max_position_embeddings
a__ : Dict = type_vocab_size
a__ : Any = type_sequence_label_size
a__ : List[str] = initializer_range
a__ : List[str] = num_labels
a__ : Optional[Any] = num_choices
a__ : str = scope
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a__ : Tuple = None
if self.use_input_mask:
a__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length])
a__ : Any = None
if self.use_token_type_ids:
a__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a__ : str = None
a__ : List[Any] = None
a__ : List[str] = None
if self.use_labels:
a__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a__ : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a__ : str = ids_tensor([self.batch_size] , self.num_choices)
a__ : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = NystromformerModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : List[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase)
a__ : int = model(lowercase , token_type_ids=lowercase)
a__ : Optional[Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : List[str] = NystromformerForMaskedLM(config=lowercase)
model.to(lowercase)
model.eval()
a__ : int = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Union[str, Any]:
'''simple docstring'''
a__ : Any = NystromformerForQuestionAnswering(config=lowercase)
model.to(lowercase)
model.eval()
a__ : str = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : int = self.num_labels
a__ : Optional[Any] = NystromformerForSequenceClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : Tuple = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.num_labels
a__ : int = NystromformerForTokenClassification(config=lowercase)
model.to(lowercase)
model.eval()
a__ : str = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def __lowercase ( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase) -> Any:
'''simple docstring'''
a__ : Optional[int] = self.num_choices
a__ : Tuple = NystromformerForMultipleChoice(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : Tuple = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : str = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a__ : Optional[int] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : List[Any] = self.prepare_config_and_inputs()
(
(
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) , (
a__
) ,
) : str = config_and_inputs
a__ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
__A : str = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Optional[Any] = False
__A : Tuple = False
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : int = NystromformerModelTester(self)
a__ : Any = ConfigTester(self , config_class=lowercase , hidden_size=37)
def __lowercase ( self) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : Optional[Any] = type
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase)
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase)
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : int = NystromformerModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
a__ : List[str] = NystromformerModel.from_pretrained('uw-madison/nystromformer-512')
a__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
a__ : List[Any] = model(lowercase)[0]
a__ : str = torch.Size((1, 6, 768))
self.assertEqual(output.shape , lowercase)
a__ : str = torch.tensor(
[[[-0.45_32, -0.09_36, 0.51_37], [-0.26_76, 0.06_28, 0.61_86], [-0.36_29, -0.17_26, 0.47_16]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1e-4))
@slow
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : Any = 'the [MASK] of Belgium is Brussels'
a__ : List[str] = AutoTokenizer.from_pretrained('uw-madison/nystromformer-512')
a__ : Optional[int] = NystromformerForMaskedLM.from_pretrained('uw-madison/nystromformer-512')
a__ : List[Any] = tokenizer(lowercase , return_tensors='pt')
with torch.no_grad():
a__ : Union[str, Any] = model(encoding.input_ids).logits
a__ : str = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(lowercase) , 'capital')
| 225
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , a_ , a_=13 , a_=7 , a_=True , a_=True , a_=True , a_=True , a_=99 , a_=32 , a_=5 , a_=4 , a_=37 , a_="gelu" , a_=0.1 , a_=0.1 , a_=5_12 , a_=16 , a_=2 , a_=0.02 , a_=4 , ):
'''simple docstring'''
__snake_case : Union[str, Any] = parent
__snake_case : Dict = batch_size
__snake_case : Optional[int] = seq_length
__snake_case : Tuple = is_training
__snake_case : Optional[int] = use_attention_mask
__snake_case : Dict = use_token_type_ids
__snake_case : Dict = use_labels
__snake_case : Tuple = vocab_size
__snake_case : Tuple = hidden_size
__snake_case : List[str] = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : Optional[int] = intermediate_size
__snake_case : Any = hidden_act
__snake_case : Union[str, Any] = hidden_dropout_prob
__snake_case : Union[str, Any] = attention_probs_dropout_prob
__snake_case : Dict = max_position_embeddings
__snake_case : str = type_vocab_size
__snake_case : List[Any] = type_sequence_label_size
__snake_case : Optional[int] = initializer_range
__snake_case : Any = num_choices
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Union[str, Any] = None
if self.use_attention_mask:
__snake_case : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : Optional[Any] = None
if self.use_token_type_ids:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case : Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = config_and_inputs
__snake_case : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( __snake_case, unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ =(
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Dict = FlaxAlbertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__snake_case : Union[str, Any] = model_class_name.from_pretrained('''albert-base-v2''' )
__snake_case : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a_ )
@require_flax
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
__snake_case : List[str] = np.array([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
__snake_case : Tuple = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
__snake_case : Dict = model(a_ , attention_mask=a_ )[0]
__snake_case : Dict = (1, 11, 7_68)
self.assertEqual(output.shape , a_ )
__snake_case : int = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a_ , atol=1E-4 ) )
| 102
|
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
SCREAMING_SNAKE_CASE : Dict = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
SCREAMING_SNAKE_CASE : Any = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
SCREAMING_SNAKE_CASE : Tuple = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class _UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def SCREAMING_SNAKE_CASE (self , a_ , a_ , a_=None , a_=1 , a_="binary" , a_=None , a_="warn" , ):
'''simple docstring'''
__snake_case : Any = recall_score(
a_ , a_ , labels=a_ , pos_label=a_ , average=a_ , sample_weight=a_ , zero_division=a_ , )
return {"recall": float(a_ ) if score.size == 1 else score}
| 102
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _lowerCamelCase ( ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCAmelCase_ : Dict = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' )
return image
def _lowerCamelCase ( lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) )
rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') )
rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[int] ):
"""simple docstring"""
UpperCAmelCase_ : int = dct.pop(lowerCamelCase_ )
UpperCAmelCase_ : str = val
def _lowerCamelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCAmelCase_ : List[str] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' )
UpperCAmelCase_ : Tuple = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' )
# next, set bias in the state dict
UpperCAmelCase_ : Union[str, Any] = torch.cat((q_bias, torch.zeros_like(lowerCamelCase_ , requires_grad=lowerCamelCase_ ), v_bias) )
UpperCAmelCase_ : int = qkv_bias
def _lowerCamelCase ( lowerCamelCase_ : List[Any] , lowerCamelCase_ : Any ):
"""simple docstring"""
UpperCAmelCase_ : List[Any] = 364 if 'coco' in model_name else 224
UpperCAmelCase_ : Tuple = BlipaVisionConfig(image_size=lowerCamelCase_ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCAmelCase_ : Union[str, Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=lowerCamelCase_ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCAmelCase_ : List[str] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=lowerCamelCase_ ).to_dict()
elif "t5-xl" in model_name:
UpperCAmelCase_ : str = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCAmelCase_ : List[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCAmelCase_ : Union[str, Any] = BlipaConfig(vision_config=lowerCamelCase_ , text_config=lowerCamelCase_ )
return config, image_size
@torch.no_grad()
def _lowerCamelCase ( lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : Optional[int]=None , lowerCamelCase_ : Tuple=False ):
"""simple docstring"""
UpperCAmelCase_ : Any = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCAmelCase_ : List[Any] = tokenizer('\n' , add_special_tokens=lowerCamelCase_ ).input_ids[0]
UpperCAmelCase_ , UpperCAmelCase_ : Dict = get_blipa_config(lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
UpperCAmelCase_ : str = BlipaForConditionalGeneration(lowerCamelCase_ ).eval()
UpperCAmelCase_ : Optional[int] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCAmelCase_ , UpperCAmelCase_ : int = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCAmelCase_ : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = load_model_and_preprocess(
name=lowerCamelCase_ , model_type=lowerCamelCase_ , is_eval=lowerCamelCase_ , device=lowerCamelCase_ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCAmelCase_ : Optional[int] = original_model.state_dict()
UpperCAmelCase_ : Optional[Any] = create_rename_keys(lowerCamelCase_ )
for src, dest in rename_keys:
rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCAmelCase_ : str = state_dict.pop(lowerCamelCase_ )
if key.startswith('Qformer.bert' ):
UpperCAmelCase_ : Optional[int] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCAmelCase_ : List[Any] = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCAmelCase_ : List[str] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCAmelCase_ : List[str] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCAmelCase_ : Any = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCAmelCase_ : Any = key.replace('t5' , 'language' )
UpperCAmelCase_ : Optional[Any] = val
# read in qv biases
read_in_q_v_bias(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = hf_model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
assert len(lowerCamelCase_ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCAmelCase_ : Tuple = load_demo_image()
UpperCAmelCase_ : Tuple = vis_processors['eval'](lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
UpperCAmelCase_ : Tuple = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(lowerCamelCase_ )
# create processor
UpperCAmelCase_ : List[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = BlipaProcessor(image_processor=lowerCamelCase_ , tokenizer=lowerCamelCase_ )
UpperCAmelCase_ : Tuple = processor(images=lowerCamelCase_ , return_tensors='pt' ).pixel_values.to(lowerCamelCase_ )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCamelCase_ , lowerCamelCase_ )
original_model.to(lowerCamelCase_ )
hf_model.to(lowerCamelCase_ )
with torch.no_grad():
if "opt" in model_name:
UpperCAmelCase_ : Tuple = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCAmelCase_ : Any = hf_model(lowerCamelCase_ , lowerCamelCase_ ).logits
else:
UpperCAmelCase_ : List[str] = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCAmelCase_ : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCAmelCase_ : List[str] = hf_model(lowerCamelCase_ , lowerCamelCase_ , labels=lowerCamelCase_ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCAmelCase_ : int = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowerCamelCase_ )
assert torch.allclose(logits[0, :3, :3] , lowerCamelCase_ , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCAmelCase_ : Any = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowerCamelCase_ )
else:
# cast to same type
UpperCAmelCase_ : Dict = logits.dtype
assert torch.allclose(original_logits.to(lowerCamelCase_ ) , lowerCamelCase_ , atol=1e-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCAmelCase_ : Optional[Any] = ''
UpperCAmelCase_ : str = tokenizer(lowerCamelCase_ , return_tensors='pt' ).input_ids.to(lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = original_model.generate({'image': original_pixel_values} )
UpperCAmelCase_ : Optional[int] = hf_model.generate(
lowerCamelCase_ , lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , lowerCamelCase_ )
UpperCAmelCase_ : Tuple = input_ids.shape[1]
UpperCAmelCase_ : Optional[Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCamelCase_ )
UpperCAmelCase_ : Optional[int] = [text.strip() for text in output_text]
print('HF generation:' , lowerCamelCase_ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCamelCase_ )
hf_model.save_pretrained(lowerCamelCase_ )
if push_to_hub:
processor.push_to_hub(F'''nielsr/{model_name}''' )
hf_model.push_to_hub(F'''nielsr/{model_name}''' )
if __name__ == "__main__":
snake_case__ : int = argparse.ArgumentParser()
snake_case__ : Any = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
snake_case__ : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 361
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : str , lowerCamelCase_ : str = " " ):
"""simple docstring"""
UpperCAmelCase_ : str = []
UpperCAmelCase_ : List[Any] = 0
for index, char in enumerate(lowerCamelCase_ ):
if char == separator:
split_words.append(string[last_index:index] )
UpperCAmelCase_ : Optional[Any] = index + 1
elif index + 1 == len(lowerCamelCase_ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 274
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json",
"google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json",
"google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """big_bird"""
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple=5_0_3_5_8 , UpperCamelCase__ : Tuple=7_6_8 , UpperCamelCase__ : Tuple=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : List[str]=3_0_7_2 , UpperCamelCase__ : List[str]="gelu_new" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[Any]=4_0_9_6 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Any=0.0_2 , UpperCamelCase__ : List[str]=1E-1_2 , UpperCamelCase__ : Union[str, Any]=True , UpperCamelCase__ : int=0 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : Union[str, Any]=2 , UpperCamelCase__ : Any=6_6 , UpperCamelCase__ : Dict="block_sparse" , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[str]=6_4 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : int=None , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(
pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , sep_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = use_cache
UpperCamelCase = rescale_embeddings
UpperCamelCase = attention_type
UpperCamelCase = use_bias
UpperCamelCase = block_size
UpperCamelCase = num_random_blocks
UpperCamelCase = classifier_dropout
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
@property
def A ( self : Tuple ):
"""simple docstring"""
if self.task == "multiple-choice":
UpperCamelCase = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 28
|
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = 42
_SCREAMING_SNAKE_CASE = None
def __lowerCamelCase ( A__ , A__=0.999 , A__="cosine" , ) -> Tuple:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(A__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(A__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCamelCase = []
for i in range(A__ ):
UpperCamelCase = i / num_diffusion_timesteps
UpperCamelCase = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ) , A__ ) )
return torch.tensor(A__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE ( _a , _a ):
"""simple docstring"""
@register_to_config
def __init__( self : List[str] , UpperCamelCase__ : int = 1_0_0_0 , UpperCamelCase__ : str = "fixed_small_log" , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[float] = 1.0 , UpperCamelCase__ : str = "epsilon" , UpperCamelCase__ : str = "squaredcos_cap_v2" , ):
"""simple docstring"""
if beta_schedule != "squaredcos_cap_v2":
raise ValueError('UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'' )
UpperCamelCase = betas_for_alpha_bar(UpperCamelCase__ )
UpperCamelCase = 1.0 - self.betas
UpperCamelCase = torch.cumprod(self.alphas , dim=0 )
UpperCamelCase = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCamelCase = 1.0
# setable values
UpperCamelCase = None
UpperCamelCase = torch.from_numpy(np.arange(0 , UpperCamelCase__ )[::-1].copy() )
UpperCamelCase = variance_type
def A ( self : Dict , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None ):
"""simple docstring"""
return sample
def A ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Union[str, torch.device] = None ):
"""simple docstring"""
UpperCamelCase = num_inference_steps
UpperCamelCase = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCamelCase = (np.arange(0 , UpperCamelCase__ ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCamelCase = torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Tuple=None ):
"""simple docstring"""
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCamelCase = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCamelCase = torch.log(torch.clamp(UpperCamelCase__ , min=1E-2_0 ) )
UpperCamelCase = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCamelCase = variance.log()
UpperCamelCase = beta.log()
UpperCamelCase = (predicted_variance + 1) / 2
UpperCamelCase = frac * max_log + (1 - frac) * min_log
return variance
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str=None , UpperCamelCase__ : bool = True , ):
"""simple docstring"""
UpperCamelCase = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCamelCase , UpperCamelCase = torch.split(UpperCamelCase__ , sample.shape[1] , dim=1 )
else:
UpperCamelCase = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCamelCase = t - 1
UpperCamelCase = self.alphas_cumprod[t]
UpperCamelCase = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCamelCase = 1 - alpha_prod_t
UpperCamelCase = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCamelCase = self.betas[t]
UpperCamelCase = self.alphas[t]
else:
UpperCamelCase = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCamelCase = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"""
' for the UnCLIPScheduler.' )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase = torch.clamp(
UpperCamelCase__ , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCamelCase = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCamelCase = 0
if t > 0:
UpperCamelCase = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=UpperCamelCase__ , device=model_output.device )
UpperCamelCase = self._get_variance(
UpperCamelCase__ , predicted_variance=UpperCamelCase__ , prev_timestep=UpperCamelCase__ , )
if self.variance_type == "fixed_small_log":
UpperCamelCase = variance
elif self.variance_type == "learned_range":
UpperCamelCase = (0.5 * variance).exp()
else:
raise ValueError(
f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"""
' for the UnCLIPScheduler.' )
UpperCamelCase = variance * variance_noise
UpperCamelCase = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=UpperCamelCase__ , pred_original_sample=UpperCamelCase__ )
def A ( self : int , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.FloatTensor , UpperCamelCase__ : torch.IntTensor , ):
"""simple docstring"""
UpperCamelCase = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCamelCase = timesteps.to(original_samples.device )
UpperCamelCase = alphas_cumprod[timesteps] ** 0.5
UpperCamelCase = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_alpha_prod.unsqueeze(-1 )
UpperCamelCase = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCamelCase = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCamelCase = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCamelCase = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 28
| 1
|
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCAmelCase ( unittest.TestCase):
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Any = "ylacombe/bark-small"
lowercase :Dict = tempfile.mkdtemp()
lowercase :int = "en_speaker_1"
lowercase :Tuple = "This is a test string"
lowercase :Tuple = "speaker_embeddings_path.json"
lowercase :str = "speaker_embeddings"
def SCREAMING_SNAKE_CASE ( self: List[str] , **_lowerCAmelCase: str ):
return AutoTokenizer.from_pretrained(self.checkpoint , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: str ):
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self: int ):
lowercase :Optional[Any] = self.get_tokenizer()
lowercase :List[str] = BarkProcessor(tokenizer=_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase :Optional[int] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE ( self: Any ):
lowercase :List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase :Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowercase :Dict = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="(BOS)" , eos_token="(EOS)" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE ( self: Tuple ):
lowercase :str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase :Optional[Any] = 35
lowercase :int = 2
lowercase :Optional[int] = 8
lowercase :int = {
"semantic_prompt": np.ones(_lowerCAmelCase ),
"coarse_prompt": np.ones((nb_codebooks_coarse, seq_len) ),
"fine_prompt": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase :int = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
lowercase :Optional[Any] = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase :Tuple = os.path.join(self.tmpdirname , "file.npz" )
np.savez(_lowerCAmelCase , **_lowerCAmelCase )
lowercase :Union[str, Any] = processor(text=self.input_string , voice_preset=_lowerCAmelCase )
lowercase :Any = inputs["history_prompt"]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_lowerCAmelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase :Tuple = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :int = self.get_tokenizer()
lowercase :Tuple = BarkProcessor(tokenizer=_lowerCAmelCase )
lowercase :str = processor(text=self.input_string )
lowercase :Dict = tokenizer(
self.input_string , padding="max_length" , max_length=2_56 , add_special_tokens=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 355
|
def UpperCAmelCase__ ( lowerCamelCase ):
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise TypeError("Input value must be an 'int' type" )
lowercase :int = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 158
| 0
|
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _snake_case ( UpperCAmelCase_ : int ):
A__ = os.path.join(args.tf_model_dir , """parameters.json""" )
A__ = json.loads(open(UpperCAmelCase__ ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(""".pt""" ):
A__ = args.output + """.pt"""
A__ = OrderedDict()
with tf.device("""/CPU:0""" ):
A__ = tf.train.load_checkpoint(args.tf_model_dir )
A__ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
A__ = reader.get_tensor(UpperCAmelCase__ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
A__ = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
A__ = 8
A__ = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/moe""" ):
A__ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
A__ = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/softmlp/kernel""" ):
A__ = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
A__ = key_name[-9:-7]
for i in range(16 ):
A__ = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
A__ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/mlp""" ):
A__ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
A__ = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/p1/bias""" ):
A__ = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/p2/kernel""" ):
A__ = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/p2/bias""" ):
A__ = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/ln""" ):
A__ = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
A__ = """model.blocks.%d.feed_forward.norm.bias""" % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/g""" ):
A__ = """model.blocks.%d.feed_forward.norm.weight""" % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/att""" ):
A__ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
A__ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
A__ = state[:, 0, :, :]
A__ = state[:, 1, :, :]
A__ = state[:, 2, :, :]
A__ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
A__ = torch.tensor(UpperCAmelCase__ )
A__ = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
A__ = torch.tensor(UpperCAmelCase__ )
A__ = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/o/kernel""" ):
A__ = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
A__ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/an""" ):
A__ = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
A__ = """model.blocks.%d.self_attn.norm.bias""" % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/g""" ):
A__ = """model.blocks.%d.self_attn.norm.weight""" % player
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(UpperCAmelCase__ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
A__ = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
A__ = """model.%s.weight""" % nlayer
A__ = vnp.copy() # same in embedded
A__ = torch.tensor(UpperCAmelCase__ )
if key_name.startswith("""model/wte""" ):
A__ = """lm_head.weight"""
A__ = vnp.copy() # same in embedded
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/wob""" ):
A__ = """final_logits_bias"""
A__ = vnp.copy() # same in embedded
A__ = state.reshape((1, -1) )
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name == "model/dense/kernel":
A__ = """model.last_project.weight"""
A__ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
A__ = torch.tensor(UpperCAmelCase__ )
elif key_name == "model/dense_1/bias":
A__ = """model.last_project.bias"""
A__ = vnp.copy() # same because it is one dimensional
A__ = torch.tensor(UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , args.output )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 335
|
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCamelCase ( UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Optional[int] ) -> Optional[int]:
lowercase_ : Union[str, Any] = hf_hub_url(repo_id=UpperCAmelCase__ , path=UpperCAmelCase__ , revision=UpperCAmelCase__ )
assert url == F'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(UpperCAmelCase__ )}'''
| 239
| 0
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def UpperCamelCase_ ( A__ : list , A__ : list , A__ : list , A__ : list , A__ : list ):
'''simple docstring'''
lowerCAmelCase_ : Any = np.array([[1, item, train_mtch[i]] for i, item in enumerate(A__ )] )
lowerCAmelCase_ : Optional[Any] = np.array(A__ )
lowerCAmelCase_ : List[str] = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , A__ ) ) , x.transpose() ) , A__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def UpperCamelCase_ ( A__ : list , A__ : list , A__ : list ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = (1, 2, 1)
lowerCAmelCase_ : Optional[Any] = (1, 1, 0, 7)
lowerCAmelCase_ : List[Any] = SARIMAX(
A__ , exog=A__ , order=A__ , seasonal_order=A__ )
lowerCAmelCase_ : Union[str, Any] = model.fit(disp=A__ , maxiter=6_00 , method="""nm""" )
lowerCAmelCase_ : Optional[int] = model_fit.predict(1 , len(A__ ) , exog=[test_match] )
return result[0]
def UpperCamelCase_ ( A__ : list , A__ : list , A__ : list ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = SVR(kernel="""rbf""" , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(A__ , A__ )
lowerCAmelCase_ : List[str] = regressor.predict(A__ )
return y_pred[0]
def UpperCamelCase_ ( A__ : list ):
'''simple docstring'''
train_user.sort()
lowerCAmelCase_ : Dict = np.percentile(A__ , 25 )
lowerCAmelCase_ : Union[str, Any] = np.percentile(A__ , 75 )
lowerCAmelCase_ : Optional[int] = qa - qa
lowerCAmelCase_ : Optional[int] = qa - (iqr * 0.1)
return low_lim
def UpperCamelCase_ ( A__ : list , A__ : float ):
'''simple docstring'''
lowerCAmelCase_ : Tuple = 0
lowerCAmelCase_ : Optional[Any] = 0
for i in list_vote:
if i > actual_result:
lowerCAmelCase_ : Any = not_safe + 1
else:
if abs(abs(A__ ) - abs(A__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
__A : Union[str, Any] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
__A : str = pd.DataFrame(
data_input, columns=["total_user", "total_even", "days"]
)
__A : Tuple = Normalizer().fit_transform(data_input_df.values)
# split data
__A : List[str] = normalize_df[:, 2].tolist()
__A : Optional[int] = normalize_df[:, 0].tolist()
__A : Optional[int] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
__A : Optional[Any] = normalize_df[:, [1, 2]].tolist()
__A : Dict = x[: len(x) - 1]
__A : Union[str, Any] = x[len(x) - 1 :]
# for linear regression & sarimax
__A : List[Any] = total_date[: len(total_date) - 1]
__A : Dict = total_user[: len(total_user) - 1]
__A : Optional[int] = total_match[: len(total_match) - 1]
__A : Optional[int] = total_date[len(total_date) - 1 :]
__A : Tuple = total_user[len(total_user) - 1 :]
__A : Optional[int] = total_match[len(total_match) - 1 :]
# voting system with forecasting
__A : str = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
__A : Optional[Any] = "" if data_safety_checker(res_vote, tst_user) else "not "
print("Today's data is {not_str}safe.")
| 359
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def UpperCamelCase_ ( A__ : np.ndarray , A__ : np.ndarray , A__ : np.ndarray , A__ : int , A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = cva.getAffineTransform(A__ , A__ )
return cva.warpAffine(A__ , A__ , (rows, cols) )
if __name__ == "__main__":
# read original image
__A : Dict = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
__A : List[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__A , __A : Dict = gray_img.shape
# set different points to rotate image
__A : List[str] = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__A : Tuple = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__A : List[Any] = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__A : Optional[Any] = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__A : Optional[Any] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__A : Dict = plt.figure(1)
__A : Optional[Any] = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show()
| 89
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : Tuple = logging.get_logger(__name__)
lowerCamelCase__ : int = {}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "llama"
lowercase_ = ["past_key_values"]
def __init__( self : List[Any] , _lowerCAmelCase : Optional[Any]=32_000 , _lowerCAmelCase : Optional[int]=4_096 , _lowerCAmelCase : str=11_008 , _lowerCAmelCase : Optional[Any]=32 , _lowerCAmelCase : Tuple=32 , _lowerCAmelCase : List[str]=None , _lowerCAmelCase : List[str]="silu" , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : Dict=1E-6 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : Any=0 , _lowerCAmelCase : Tuple=1 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : str=1 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : List[Any]=None , **_lowerCAmelCase : Dict , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = num_key_value_heads
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = rms_norm_eps
SCREAMING_SNAKE_CASE_ = pretraining_tp
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase , )
def lowerCAmelCase_ ( self : List[Any] ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _lowerCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE_ = self.rope_scaling.get('type' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = self.rope_scaling.get('factor' , _lowerCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 225
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ : List[str] = logging.get_logger(__name__)
lowerCamelCase__ : List[str] = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "speech_to_text"
lowercase_ = ["past_key_values"]
lowercase_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Tuple , _lowerCAmelCase : List[Any]=10_000 , _lowerCAmelCase : List[Any]=12 , _lowerCAmelCase : Union[str, Any]=2_048 , _lowerCAmelCase : Optional[int]=4 , _lowerCAmelCase : Union[str, Any]=6 , _lowerCAmelCase : Optional[int]=2_048 , _lowerCAmelCase : Optional[Any]=4 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[Any]=0.0 , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : int="relu" , _lowerCAmelCase : Union[str, Any]=256 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : Tuple=0.0 , _lowerCAmelCase : List[str]=0.02 , _lowerCAmelCase : List[str]=2 , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : List[str]=1 , _lowerCAmelCase : str=0 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Union[str, Any]=6_000 , _lowerCAmelCase : List[str]=1_024 , _lowerCAmelCase : str=2 , _lowerCAmelCase : Optional[Any]=(5, 5) , _lowerCAmelCase : str=1_024 , _lowerCAmelCase : str=80 , _lowerCAmelCase : Tuple=1 , **_lowerCAmelCase : Any , ):
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE_ = max_source_positions
SCREAMING_SNAKE_CASE_ = max_target_positions
SCREAMING_SNAKE_CASE_ = num_conv_layers
SCREAMING_SNAKE_CASE_ = list(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = conv_channels
SCREAMING_SNAKE_CASE_ = input_feat_per_channel
SCREAMING_SNAKE_CASE_ = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F"but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, "
F"`config.num_conv_layers = {self.num_conv_layers}`." )
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
| 225
| 1
|
def lowerCamelCase ( SCREAMING_SNAKE_CASE = 1_000 ):
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 353
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self , __lowercase , ) -> Optional[Any]:
__UpperCamelCase :Optional[int] = parent
__UpperCamelCase :Optional[int] = 13
__UpperCamelCase :Dict = 7
__UpperCamelCase :Optional[int] = True
__UpperCamelCase :List[str] = True
__UpperCamelCase :List[Any] = True
__UpperCamelCase :Union[str, Any] = True
__UpperCamelCase :Any = True
__UpperCamelCase :Optional[int] = False
__UpperCamelCase :Any = False
__UpperCamelCase :str = False
__UpperCamelCase :Optional[Any] = 2
__UpperCamelCase :Optional[int] = 99
__UpperCamelCase :Any = 0
__UpperCamelCase :List[Any] = 32
__UpperCamelCase :int = 2
__UpperCamelCase :Optional[Any] = 4
__UpperCamelCase :Dict = 0.1
__UpperCamelCase :Optional[Any] = 0.1
__UpperCamelCase :str = 512
__UpperCamelCase :Any = 16
__UpperCamelCase :str = 2
__UpperCamelCase :Dict = 0.02
__UpperCamelCase :List[Any] = 3
__UpperCamelCase :Optional[int] = 4
__UpperCamelCase :Tuple = '''last'''
__UpperCamelCase :Any = True
__UpperCamelCase :Union[str, Any] = None
__UpperCamelCase :Tuple = 0
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__UpperCamelCase :Any = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa)
__UpperCamelCase :Tuple = None
if self.use_input_lengths:
__UpperCamelCase :List[str] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
__UpperCamelCase :Any = None
if self.use_token_type_ids:
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
__UpperCamelCase :Dict = None
__UpperCamelCase :List[str] = None
__UpperCamelCase :Union[str, Any] = None
if self.use_labels:
__UpperCamelCase :Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__UpperCamelCase :int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__UpperCamelCase :List[str] = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa)
__UpperCamelCase :Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
__UpperCamelCase :Optional[int] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[Any]:
__UpperCamelCase :str = TFFlaubertModel(config=__lowercase)
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__UpperCamelCase :Optional[int] = model(__lowercase)
__UpperCamelCase :Optional[int] = [input_ids, input_mask]
__UpperCamelCase :Union[str, Any] = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> int:
__UpperCamelCase :str = TFFlaubertWithLMHeadModel(__lowercase)
__UpperCamelCase :List[str] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
__UpperCamelCase :Optional[int] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :Any = TFFlaubertForQuestionAnsweringSimple(__lowercase)
__UpperCamelCase :str = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__UpperCamelCase :int = model(__lowercase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Any:
__UpperCamelCase :int = TFFlaubertForSequenceClassification(__lowercase)
__UpperCamelCase :List[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths}
__UpperCamelCase :Any = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Tuple:
__UpperCamelCase :Optional[int] = self.num_labels
__UpperCamelCase :int = TFFlaubertForTokenClassification(config=__lowercase)
__UpperCamelCase :Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__UpperCamelCase :Optional[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> List[Any]:
__UpperCamelCase :Any = self.num_choices
__UpperCamelCase :Any = TFFlaubertForMultipleChoice(config=__lowercase)
__UpperCamelCase :List[str] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Optional[Any] = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Tuple = tf.tile(tf.expand_dims(__lowercase , 1) , (1, self.num_choices, 1))
__UpperCamelCase :Any = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__UpperCamelCase :List[Any] = model(__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[int] = self.prepare_config_and_inputs()
(
(
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) , (
__UpperCamelCase
) ,
) :Any = config_and_inputs
__UpperCamelCase :int = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
a__ : List[Any] = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
a__ : Tuple = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
a__ : Tuple = (
{
"""feature-extraction""": TFFlaubertModel,
"""fill-mask""": TFFlaubertWithLMHeadModel,
"""question-answering""": TFFlaubertForQuestionAnsweringSimple,
"""text-classification""": TFFlaubertForSequenceClassification,
"""token-classification""": TFFlaubertForTokenClassification,
"""zero-shot""": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
a__ : Tuple = False
a__ : Any = False
def UpperCamelCase__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase) -> Any:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :int = TFFlaubertModelTester(self)
__UpperCamelCase :Dict = ConfigTester(self , config_class=__lowercase , emb_dim=37)
def UpperCamelCase__ ( self) -> str:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self) -> Any:
__UpperCamelCase :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__lowercase)
def UpperCamelCase__ ( self) -> Union[str, Any]:
__UpperCamelCase :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__lowercase)
def UpperCamelCase__ ( self) -> Dict:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__lowercase)
def UpperCamelCase__ ( self) -> Tuple:
__UpperCamelCase :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__lowercase)
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__lowercase)
def UpperCamelCase__ ( self) -> str:
__UpperCamelCase :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__lowercase)
@slow
def UpperCamelCase__ ( self) -> Union[str, Any]:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCamelCase :Any = TFFlaubertModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self) -> Optional[Any]:
__UpperCamelCase :List[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''')
__UpperCamelCase :Tuple = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
__UpperCamelCase :Union[str, Any] = model(__lowercase)[0]
__UpperCamelCase :str = tf.TensorShape((1, 8, 512))
self.assertEqual(output.shape , __lowercase)
# compare the actual values for a slice.
__UpperCamelCase :Optional[int] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4))
| 105
| 0
|
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=7 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=99 , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=16 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=4 , ) -> List[Any]:
lowerCamelCase : Union[str, Any] = parent
lowerCamelCase : Any = batch_size
lowerCamelCase : Dict = seq_length
lowerCamelCase : Optional[int] = is_training
lowerCamelCase : int = use_attention_mask
lowerCamelCase : Union[str, Any] = use_token_type_ids
lowerCamelCase : List[str] = use_labels
lowerCamelCase : Optional[Any] = vocab_size
lowerCamelCase : int = hidden_size
lowerCamelCase : Tuple = num_hidden_layers
lowerCamelCase : List[str] = num_attention_heads
lowerCamelCase : List[str] = intermediate_size
lowerCamelCase : str = hidden_act
lowerCamelCase : Any = hidden_dropout_prob
lowerCamelCase : Optional[int] = attention_probs_dropout_prob
lowerCamelCase : List[str] = max_position_embeddings
lowerCamelCase : Optional[Any] = type_vocab_size
lowerCamelCase : Tuple = type_sequence_label_size
lowerCamelCase : Any = initializer_range
lowerCamelCase : Optional[Any] = num_choices
def _lowercase ( self ) -> Dict:
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : str = None
if self.use_attention_mask:
lowerCamelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase : Optional[Any] = None
if self.use_token_type_ids:
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Optional[int] = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self ) -> Tuple:
lowerCamelCase : Optional[int] = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : int = config_and_inputs
lowerCamelCase : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowercase ( self ) -> List[Any]:
lowerCamelCase : str = self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Tuple = config_and_inputs
lowerCamelCase : List[Any] = True
lowerCamelCase : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCamelCase : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class UpperCamelCase__ (lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : str = True
lowerCamelCase_ : List[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[int] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def _lowercase ( self ) -> List[Any]:
for model_class_name in self.all_model_classes:
lowerCamelCase : Dict = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
lowerCamelCase : Dict = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ) -> Any:
lowerCamelCase : Optional[int] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
lowerCamelCase : Tuple = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase : List[str] = model(UpperCamelCase__ )[0]
lowerCamelCase : List[str] = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , UpperCamelCase__ )
# compare the actual values for a slice.
lowerCamelCase : Dict = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def _lowercase ( self ) -> Optional[int]:
lowerCamelCase : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase__ )
lowerCamelCase : Optional[int] = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowerCamelCase : List[str] = model(UpperCamelCase__ )[0]
# compare the actual values for a slice.
lowerCamelCase : Dict = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
| 48
|
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def __lowerCamelCase ( __a :Dict ) -> Any:
"""simple docstring"""
A__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(__a , __a )
def __lowerCamelCase ( __a :str ) -> Union[str, Any]:
"""simple docstring"""
A__ , A__ = emb.weight.shape
A__ = nn.Linear(__a , __a , bias=__a )
A__ = emb.weight.data
return lin_layer
def __lowerCamelCase ( __a :str ) -> List[str]:
"""simple docstring"""
A__ = torch.load(__a , map_location="""cpu""" )
A__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
A__ = checkpoint["""model"""]
remove_ignore_keys_(__a )
A__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
A__ = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
A__ = XGLMConfig(
vocab_size=__a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
A__ = XGLMForCausalLM(__a )
A__ = model.load_state_dict(__a , strict=__a )
print(__a )
A__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
A : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
A : str = parser.parse_args()
A : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 274
| 0
|
'''simple docstring'''
def lowerCamelCase__ ( _A , _A ):
a : Any = 0
a : Tuple = len(_A ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
a : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_A ):
return None
a : List[str] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
a : Union[str, Any] = left
a : Any = point
elif point > right:
a : Optional[Any] = right
a : List[Any] = point
else:
if item < current_item:
a : Any = point - 1
else:
a : Tuple = point + 1
return None
def lowerCamelCase__ ( _A , _A , _A , _A ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
a : Tuple = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(_A ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(_A , _A , _A , _A )
elif point > right:
return interpolation_search_by_recursion(_A , _A , _A , _A )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
_A , _A , _A , point - 1 )
else:
return interpolation_search_by_recursion(
_A , _A , point + 1 , _A )
def lowerCamelCase__ ( _A ):
if collection != sorted(_A ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
lowerCAmelCase: Any = 0
if debug == 1:
lowerCAmelCase: Dict = [1_0, 3_0, 4_0, 4_5, 5_0, 6_6, 7_7, 9_3]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('Sequence must be ascending sorted to apply interpolation search')
lowerCAmelCase: List[Any] = 6_7
lowerCAmelCase: List[Any] = interpolation_search(collection, target)
if result is not None:
print(F"{target} found at positions: {result}")
else:
print('Not found')
| 351
|
'''simple docstring'''
from math import factorial, pi
def lowerCamelCase__ ( _A , _A = 30 ):
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_sin() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_sin() requires a positive int for accuracy' )
a : Dict = float(_A )
a : List[Any] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_A ) )
def lowerCamelCase__ ( _A , _A = 30 ):
if not isinstance(_A , (int, float) ):
raise ValueError('maclaurin_cos() requires either an int or float for theta' )
if not isinstance(_A , _A ) or accuracy <= 0:
raise ValueError('maclaurin_cos() requires a positive int for accuracy' )
a : int = float(_A )
a : str = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(1_0))
print(maclaurin_sin(-1_0))
print(maclaurin_sin(1_0, 1_5))
print(maclaurin_sin(-1_0, 1_5))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(1_0, 1_5))
print(maclaurin_cos(-1_0, 1_5))
| 96
| 0
|
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1 / sqrt(2)):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = (1 - _cos) / 2
__lowerCAmelCase = 1 - _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2)
filt.set_coefficients([aa, aa, aa], [ba, ba, ba])
return filt
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1 / sqrt(2)):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = (1 + _cos) / 2
__lowerCAmelCase = -1 - _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2)
filt.set_coefficients([aa, aa, aa], [ba, ba, ba])
return filt
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1 / sqrt(2)):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = _sin / 2
__lowerCAmelCase = 0
__lowerCAmelCase = -ba
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = IIRFilter(2)
filt.set_coefficients([aa, aa, aa], [ba, ba, ba])
return filt
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase = 1 / sqrt(2)):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 1 - alpha
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 + alpha
__lowerCAmelCase = IIRFilter(2)
filt.set_coefficients([ba, ba, ba], [ba, ba, ba])
return filt
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1 / sqrt(2), ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 1_0 ** (gain_db / 4_0)
__lowerCAmelCase = 1 + alpha * big_a
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha * big_a
__lowerCAmelCase = 1 + alpha / big_a
__lowerCAmelCase = -2 * _cos
__lowerCAmelCase = 1 - alpha / big_a
__lowerCAmelCase = IIRFilter(2)
filt.set_coefficients([aa, aa, aa], [ba, ba, ba])
return filt
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1 / sqrt(2), ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 1_0 ** (gain_db / 4_0)
__lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase = 2 * sqrt(SCREAMING_SNAKE_CASE_) * alpha
__lowerCAmelCase = big_a * (pmc + aaa)
__lowerCAmelCase = 2 * big_a * mpc
__lowerCAmelCase = big_a * (pmc - aaa)
__lowerCAmelCase = ppmc + aaa
__lowerCAmelCase = -2 * pmpc
__lowerCAmelCase = ppmc - aaa
__lowerCAmelCase = IIRFilter(2)
filt.set_coefficients([aa, aa, aa], [ba, ba, ba])
return filt
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase = 1 / sqrt(2), ):
__lowerCAmelCase = tau * frequency / samplerate
__lowerCAmelCase = sin(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = cos(SCREAMING_SNAKE_CASE_)
__lowerCAmelCase = _sin / (2 * q_factor)
__lowerCAmelCase = 1_0 ** (gain_db / 4_0)
__lowerCAmelCase = (big_a + 1) - (big_a - 1) * _cos
__lowerCAmelCase = (big_a + 1) + (big_a - 1) * _cos
__lowerCAmelCase = (big_a - 1) - (big_a + 1) * _cos
__lowerCAmelCase = (big_a - 1) + (big_a + 1) * _cos
__lowerCAmelCase = 2 * sqrt(SCREAMING_SNAKE_CASE_) * alpha
__lowerCAmelCase = big_a * (ppmc + aaa)
__lowerCAmelCase = -2 * big_a * pmpc
__lowerCAmelCase = big_a * (ppmc - aaa)
__lowerCAmelCase = pmc + aaa
__lowerCAmelCase = 2 * mpc
__lowerCAmelCase = pmc - aaa
__lowerCAmelCase = IIRFilter(2)
filt.set_coefficients([aa, aa, aa], [ba, ba, ba])
return filt
| 174
|
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
return {key.lstrip("-" ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def __a():
'''simple docstring'''
_lowerCAmelCase = ArgumentParser(
"HuggingFace Datasets CLI tool" , usage="datasets-cli <command> [<args>]" , allow_abbrev=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = parser.add_subparsers(help="datasets-cli command helpers" )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
EnvironmentCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
TestCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
RunBeamCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
DummyDataCommand.register_subcommand(SCREAMING_SNAKE_CASE_ )
# Parse args
_lowerCAmelCase , _lowerCAmelCase = parser.parse_known_args()
if not hasattr(SCREAMING_SNAKE_CASE_ , "func" ):
parser.print_help()
exit(1 )
_lowerCAmelCase = parse_unknown_args(SCREAMING_SNAKE_CASE_ )
# Run
_lowerCAmelCase = args.func(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
service.run()
if __name__ == "__main__":
main()
| 158
| 0
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Optional[Any] = {
't5-small': 'https://huggingface.co/t5-small/resolve/main/config.json',
't5-base': 'https://huggingface.co/t5-base/resolve/main/config.json',
't5-large': 'https://huggingface.co/t5-large/resolve/main/config.json',
't5-3b': 'https://huggingface.co/t5-3b/resolve/main/config.json',
't5-11b': 'https://huggingface.co/t5-11b/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = """t5"""
lowerCAmelCase_ = ["""past_key_values"""]
lowerCAmelCase_ = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , A_=32128 , A_=512 , A_=64 , A_=2048 , A_=6 , A_=None , A_=8 , A_=32 , A_=128 , A_=0.1 , A_=1e-6 , A_=1.0 , A_="relu" , A_=True , A_=True , A_=0 , A_=1 , **A_ , )-> Dict:
'''simple docstring'''
UpperCamelCase = vocab_size
UpperCamelCase = d_model
UpperCamelCase = d_kv
UpperCamelCase = d_ff
UpperCamelCase = num_layers
UpperCamelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
UpperCamelCase = num_heads
UpperCamelCase = relative_attention_num_buckets
UpperCamelCase = relative_attention_max_distance
UpperCamelCase = dropout_rate
UpperCamelCase = layer_norm_epsilon
UpperCamelCase = initializer_factor
UpperCamelCase = feed_forward_proj
UpperCamelCase = use_cache
UpperCamelCase = self.feed_forward_proj.split('-' )
UpperCamelCase = act_info[-1]
UpperCamelCase = act_info[0] == 'gated'
if len(A_ ) > 1 and act_info[0] != "gated" or len(A_ ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
UpperCamelCase = 'gelu_new'
super().__init__(
pad_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , **A_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
@property
def UpperCAmelCase_ ( self )-> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
UpperCamelCase = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
UpperCamelCase = 'past_encoder_sequence + sequence'
UpperCamelCase = {0: 'batch'}
UpperCamelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
UpperCamelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A_ , direction='inputs' )
return common_inputs
@property
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return 13
| 251
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Tuple = {
'configuration_instructblip': [
'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InstructBlipConfig',
'InstructBlipQFormerConfig',
'InstructBlipVisionConfig',
],
'processing_instructblip': ['InstructBlipProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = [
'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'InstructBlipQFormerModel',
'InstructBlipPreTrainedModel',
'InstructBlipForConditionalGeneration',
'InstructBlipVisionModel',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 251
| 1
|
def A_ ( _lowerCAmelCase = 400_0000 ) -> int:
UpperCamelCase : Optional[Any] = [0, 1]
UpperCamelCase : str = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
UpperCamelCase : List[Any] = 0
for j in range(len(lowerCAmelCase_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 52
|
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89
| 0
|
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : List[str] =(KDPMaDiscreteScheduler,)
__A : Optional[int] =10
def UpperCamelCase__ ( self ,**_snake_case ):
UpperCAmelCase_ : Tuple = {
"num_train_timesteps": 11_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**_snake_case )
return config
def UpperCamelCase__ ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_snake_case )
def UpperCamelCase__ ( self ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] ,[0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=_snake_case ,beta_end=_snake_case )
def UpperCamelCase__ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_snake_case )
def UpperCamelCase__ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : Union[str, Any] = self.get_scheduler_config(prediction_type="v_prediction" )
UpperCAmelCase_ : Union[str, Any] = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : Optional[Any] = self.dummy_model()
UpperCAmelCase_ : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : Union[str, Any] = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : str = scheduler.scale_model_input(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,_snake_case )
UpperCAmelCase_ : int = scheduler.step(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = output.prev_sample
UpperCAmelCase_ : Optional[int] = torch.sum(torch.abs(_snake_case ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34E-07 ) < 1E-2
assert abs(result_mean.item() - 6.11_12E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCamelCase__ ( self ):
if torch_device == "mps":
return
UpperCAmelCase_ : Optional[int] = self.scheduler_classes[0]
UpperCAmelCase_ : List[Any] = self.get_scheduler_config()
UpperCAmelCase_ : Dict = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCAmelCase_ : Optional[int] = sample.to(_snake_case )
for i, t in enumerate(scheduler.timesteps ):
UpperCAmelCase_ : Tuple = scheduler.scale_model_input(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = model(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = scheduler.step(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = output.prev_sample
UpperCAmelCase_ : Any = torch.sum(torch.abs(_snake_case ) )
UpperCAmelCase_ : Optional[int] = torch.mean(torch.abs(_snake_case ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCamelCase__ ( self ):
if torch_device == "mps":
return
UpperCAmelCase_ : List[str] = self.scheduler_classes[0]
UpperCAmelCase_ : List[str] = self.get_scheduler_config()
UpperCAmelCase_ : int = scheduler_class(**_snake_case )
scheduler.set_timesteps(self.num_inference_steps ,device=_snake_case )
UpperCAmelCase_ : Optional[int] = self.dummy_model()
UpperCAmelCase_ : Tuple = self.dummy_sample_deter.to(_snake_case ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCAmelCase_ : int = scheduler.scale_model_input(_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[int] = model(_snake_case ,_snake_case )
UpperCAmelCase_ : List[Any] = scheduler.step(_snake_case ,_snake_case ,_snake_case )
UpperCAmelCase_ : Optional[Any] = output.prev_sample
UpperCAmelCase_ : Tuple = torch.sum(torch.abs(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = torch.mean(torch.abs(_snake_case ) )
if str(_snake_case ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 67
|
'''simple docstring'''
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def a__ ( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 67
| 1
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = XCLIPTextConfig()
# derive patch size from model name
snake_case_ :Union[str, Any] = model_name.find("""patch""" )
snake_case_ :List[str] = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
snake_case_ :Any = XCLIPVisionConfig(patch_size=_lowercase, num_frames=_lowercase )
if "large" in model_name:
snake_case_ :Optional[Any] = 768
snake_case_ :Union[str, Any] = 3072
snake_case_ :Any = 12
snake_case_ :Any = 1024
snake_case_ :str = 4096
snake_case_ :Union[str, Any] = 16
snake_case_ :Union[str, Any] = 24
snake_case_ :Tuple = 768
snake_case_ :Any = 3072
if model_name == "xclip-large-patch14-16-frames":
snake_case_ :Any = 336
snake_case_ :Any = XCLIPConfig.from_text_vision_configs(_lowercase, _lowercase )
if "large" in model_name:
snake_case_ :List[Any] = 768
return config
def A_ ( _lowercase ):
'''simple docstring'''
if name == "token_embedding.weight":
snake_case_ :Optional[Any] = name.replace("""token_embedding.weight""", """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
snake_case_ :Tuple = name.replace("""positional_embedding""", """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
snake_case_ :Dict = name.replace("""ln_1""", """layer_norm1""" )
if "ln_2" in name:
snake_case_ :str = name.replace("""ln_2""", """layer_norm2""" )
if "c_fc" in name:
snake_case_ :str = name.replace("""c_fc""", """fc1""" )
if "c_proj" in name:
snake_case_ :int = name.replace("""c_proj""", """fc2""" )
if name.startswith("""transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""transformer.resblocks""", """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
snake_case_ :Union[str, Any] = name.replace("""attn.out_proj""", """self_attn.out_proj""" )
if "ln_final" in name:
snake_case_ :Union[str, Any] = name.replace("""ln_final""", """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
snake_case_ :Any = name.replace("""visual.class_embedding""", """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
snake_case_ :Optional[int] = name.replace("""visual.positional_embedding""", """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
snake_case_ :Union[str, Any] = name.replace("""visual.transformer.resblocks""", """vision_model.encoder.layers""" )
if "visual.conv1" in name:
snake_case_ :int = name.replace("""visual.conv1""", """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
snake_case_ :Any = name.replace("""visual.ln_pre""", """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
snake_case_ :str = name.replace("""visual.ln_post""", """vision_model.post_layernorm""" )
if "visual.proj" in name:
snake_case_ :Union[str, Any] = name.replace("""visual.proj""", """visual_projection.weight""" )
if "text_projection" in name:
snake_case_ :Dict = name.replace("""text_projection""", """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
snake_case_ :List[str] = name.replace("""prompts_visual_proj""", """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
snake_case_ :Dict = name.replace("""prompts_visual_ln""", """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
snake_case_ :str = name.replace("""positional""", """position""" )
if name.startswith("""mit.resblocks""" ):
snake_case_ :Dict = name.replace("""mit.resblocks""", """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
snake_case_ :Union[str, Any] = name.replace("""prompts_generator.norm""", """prompts_generator.layernorm""" )
return name
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case_ :Dict = orig_state_dict.pop(_lowercase )
if "attn.in_proj" in key:
snake_case_ :Optional[Any] = key.split(""".""" )
if key.startswith("""visual""" ):
snake_case_ :Any = key_split[3]
snake_case_ :Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
snake_case_ :str = val[
:dim, :
]
snake_case_ :Optional[int] = val[
dim : dim * 2, :
]
snake_case_ :Union[str, Any] = val[
-dim:, :
]
else:
snake_case_ :Dict = val[
:dim
]
snake_case_ :Optional[int] = val[
dim : dim * 2
]
snake_case_ :Optional[int] = val[
-dim:
]
else:
if "weight" in key:
snake_case_ :Optional[Any] = val[
:dim, :
]
snake_case_ :List[str] = val[
dim : dim * 2, :
]
snake_case_ :Dict = val[
-dim:, :
]
else:
snake_case_ :Union[str, Any] = val[:dim]
snake_case_ :Union[str, Any] = val[
dim : dim * 2
]
snake_case_ :Union[str, Any] = val[-dim:]
elif key.startswith("""mit""" ):
snake_case_ :Tuple = key_split[2]
snake_case_ :Union[str, Any] = config.vision_config.mit_hidden_size
if "weight" in key:
snake_case_ :Optional[int] = val[:dim, :]
snake_case_ :Optional[int] = val[dim : dim * 2, :]
snake_case_ :str = val[-dim:, :]
else:
snake_case_ :str = val[:dim]
snake_case_ :Any = val[dim : dim * 2]
snake_case_ :int = val[-dim:]
else:
snake_case_ :Tuple = key_split[2]
snake_case_ :Any = config.text_config.hidden_size
if "weight" in key:
snake_case_ :Dict = val[:dim, :]
snake_case_ :Dict = val[
dim : dim * 2, :
]
snake_case_ :List[str] = val[-dim:, :]
else:
snake_case_ :Any = val[:dim]
snake_case_ :Tuple = val[
dim : dim * 2
]
snake_case_ :List[str] = val[-dim:]
else:
snake_case_ :Optional[int] = rename_key(_lowercase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
snake_case_ :Optional[Any] = val.T
snake_case_ :Tuple = val
return orig_state_dict
def A_ ( _lowercase ):
'''simple docstring'''
if num_frames == 8:
snake_case_ :str = """eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
snake_case_ :int = """eating_spaghetti.npy"""
elif num_frames == 32:
snake_case_ :List[str] = """eating_spaghetti_32_frames.npy"""
snake_case_ :int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""", filename=_lowercase, repo_type="""dataset""", )
snake_case_ :Union[str, Any] = np.load(_lowercase )
return list(_lowercase )
def A_ ( _lowercase, _lowercase=None, _lowercase=False ):
'''simple docstring'''
snake_case_ :List[Any] = {
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
snake_case_ :Optional[int] = model_to_url[model_name]
snake_case_ :int = 8
if "16-frames" in model_name:
snake_case_ :List[Any] = 16
elif "shot" in model_name:
snake_case_ :Dict = 32
snake_case_ :Optional[int] = get_xclip_config(_lowercase, _lowercase )
snake_case_ :Optional[Any] = XCLIPModel(_lowercase )
model.eval()
if "drive" in checkpoint_url:
snake_case_ :List[str] = """pytorch_model.bin"""
gdown.cached_download(_lowercase, _lowercase, quiet=_lowercase )
snake_case_ :List[Any] = torch.load(_lowercase, map_location="""cpu""" )["""model"""]
else:
snake_case_ :Tuple = torch.hub.load_state_dict_from_url(_lowercase )["""model"""]
snake_case_ :Union[str, Any] = convert_state_dict(_lowercase, _lowercase )
snake_case_ :str = XCLIPModel(_lowercase )
snake_case_, snake_case_ :Optional[int] = model.load_state_dict(_lowercase, strict=_lowercase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
snake_case_ :List[str] = 336 if model_name == """xclip-large-patch14-16-frames""" else 224
snake_case_ :List[Any] = VideoMAEImageProcessor(size=_lowercase )
snake_case_ :Any = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :str = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
snake_case_ :Optional[Any] = XCLIPProcessor(image_processor=_lowercase, tokenizer=_lowercase )
snake_case_ :Optional[int] = prepare_video(_lowercase )
snake_case_ :Optional[Any] = processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""], videos=_lowercase, return_tensors="""pt""", padding=_lowercase )
print("""Shape of pixel values:""", inputs.pixel_values.shape )
with torch.no_grad():
snake_case_ :List[Any] = model(**_lowercase )
# Verify outputs
snake_case_ :List[Any] = outputs.logits_per_video
snake_case_ :Any = logits_per_video.softmax(dim=1 )
print("""Probs:""", _lowercase )
# kinetics-400
if model_name == "xclip-base-patch32":
snake_case_ :Union[str, Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
snake_case_ :str = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
snake_case_ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
snake_case_ :Any = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
snake_case_ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
snake_case_ :Tuple = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
snake_case_ :Union[str, Any] = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
snake_case_ :List[Any] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
snake_case_ :Dict = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
snake_case_ :Union[str, Any] = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
snake_case_ :str = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
snake_case_ :str = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
snake_case_ :int = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
snake_case_ :Optional[int] = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
snake_case_ :Any = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
snake_case_ :Tuple = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
snake_case_ :Union[str, Any] = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"""Model name {model_name} not supported""" )
assert torch.allclose(_lowercase, _lowercase, atol=1e-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_lowercase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(_lowercase, organization="""nielsr""" )
processor.push_to_hub(_lowercase, organization="""nielsr""" )
slow_tokenizer.push_to_hub(_lowercase, organization="""nielsr""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 66
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __UpperCamelCase ( a__ ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = None , ) -> int:
super().__init__()
self.register_modules(transformer=lowerCAmelCase__ , vae=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
# create a imagenet -> id dictionary for easier use
a : str = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
a : int = int(lowerCAmelCase__ )
a : Any = dict(sorted(self.labels.items() ) )
def __a ( self , lowerCAmelCase__ ) -> List[int]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Optional[Any] = list(lowerCAmelCase__ )
for l in label:
if l not in self.labels:
raise ValueError(
f"""{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.""" )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = None , lowerCAmelCase__ = 50 , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
a : Dict = len(lowerCAmelCase__ )
a : Tuple = self.transformer.config.sample_size
a : Tuple = self.transformer.config.in_channels
a : Optional[int] = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCAmelCase__ , device=self.device , dtype=self.transformer.dtype , )
a : Tuple = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
a : List[str] = torch.tensor(lowerCAmelCase__ , device=self.device ).reshape(-1 )
a : str = torch.tensor([1000] * batch_size , device=self.device )
a : Dict = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
a : Any = latent_model_input[: len(lowerCAmelCase__ ) // 2]
a : Tuple = torch.cat([half, half] , dim=0 )
a : List[str] = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
a : Dict = t
if not torch.is_tensor(lowerCAmelCase__ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
a : Optional[int] = latent_model_input.device.type == "mps"
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a : Union[str, Any] = torch.floataa if is_mps else torch.floataa
else:
a : str = torch.intaa if is_mps else torch.intaa
a : List[str] = torch.tensor([timesteps] , dtype=lowerCAmelCase__ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
a : List[Any] = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
a : Dict = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
a : Union[str, Any] = self.transformer(
lowerCAmelCase__ , timestep=lowerCAmelCase__ , class_labels=lowerCAmelCase__ ).sample
# perform guidance
if guidance_scale > 1:
a, a : Any = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
a, a : Union[str, Any] = torch.split(lowerCAmelCase__ , len(lowerCAmelCase__ ) // 2 , dim=0 )
a : Dict = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
a : Optional[int] = torch.cat([half_eps, half_eps] , dim=0 )
a : Optional[int] = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
a, a : str = torch.split(lowerCAmelCase__ , lowerCAmelCase__ , dim=1 )
else:
a : Any = noise_pred
# compute previous image: x_t -> x_t-1
a : Optional[int] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample
if guidance_scale > 1:
a, a : Tuple = latent_model_input.chunk(2 , dim=0 )
else:
a : Tuple = latent_model_input
a : Optional[Any] = 1 / self.vae.config.scaling_factor * latents
a : Any = self.vae.decode(lowerCAmelCase__ ).sample
a : Tuple = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
a : Optional[Any] = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
a : int = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 105
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class a__ ( lowerCamelCase__ ):
'''simple docstring'''
lowercase__ : int = "transfo-xl"
lowercase__ : List[str] = ["mems"]
lowercase__ : Optional[int] = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowerCamelCase_=26_77_35 , lowerCamelCase_=[2_00_00, 4_00_00, 20_00_00] , lowerCamelCase_=10_24 , lowerCamelCase_=10_24 , lowerCamelCase_=16 , lowerCamelCase_=64 , lowerCamelCase_=40_96 , lowerCamelCase_=4 , lowerCamelCase_=False , lowerCamelCase_=18 , lowerCamelCase_=16_00 , lowerCamelCase_=10_00 , lowerCamelCase_=True , lowerCamelCase_=True , lowerCamelCase_=0 , lowerCamelCase_=-1 , lowerCamelCase_=True , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=True , lowerCamelCase_="normal" , lowerCamelCase_=0.01 , lowerCamelCase_=0.01 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_=0 , **lowerCamelCase_ , ) -> str:
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = []
self.cutoffs.extend(__lowerCamelCase )
if proj_share_all_but_first:
lowerCAmelCase__ = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase__ = [False] + [False] * len(self.cutoffs )
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_embed
lowerCAmelCase__ = d_head
lowerCAmelCase__ = d_inner
lowerCAmelCase__ = div_val
lowerCAmelCase__ = pre_lnorm
lowerCAmelCase__ = n_layer
lowerCAmelCase__ = n_head
lowerCAmelCase__ = mem_len
lowerCAmelCase__ = same_length
lowerCAmelCase__ = attn_type
lowerCAmelCase__ = clamp_len
lowerCAmelCase__ = sample_softmax
lowerCAmelCase__ = adaptive
lowerCAmelCase__ = dropout
lowerCAmelCase__ = dropatt
lowerCAmelCase__ = untie_r
lowerCAmelCase__ = init
lowerCAmelCase__ = init_range
lowerCAmelCase__ = proj_init_std
lowerCAmelCase__ = init_std
lowerCAmelCase__ = layer_norm_epsilon
super().__init__(eos_token_id=__lowerCamelCase , **__lowerCamelCase )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
logger.info(F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> int:
raise NotImplementedError(
F"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 356
|
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = [
'''word_embeddings_layernorm.weight''',
'''word_embeddings_layernorm.bias''',
'''input_layernorm.weight''',
'''input_layernorm.bias''',
'''post_attention_layernorm.weight''',
'''post_attention_layernorm.bias''',
'''self_attention.dense.bias''',
'''mlp.dense_4h_to_h.bias''',
'''ln_f.weight''',
'''ln_f.bias''',
]
__UpperCAmelCase = [
'''mlp.dense_4h_to_h.weight''',
'''self_attention.dense.weight''',
]
def _snake_case ( A , A ) -> Optional[Any]:
lowerCAmelCase__ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
lowerCAmelCase__ = int(re.match(R'''.*layer_(\d*).*''' , A )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def _snake_case ( A ) -> Optional[int]:
if dtype == torch.bool:
return 1 / 8
lowerCAmelCase__ = re.search(R'''[^\d](\d+)$''' , str(A ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
lowerCAmelCase__ = int(bit_search.groups()[0] )
return bit_size // 8
def _snake_case ( A , A , A , A , A ) -> Dict:
# Construct model
if bloom_config_file == "":
lowerCAmelCase__ = BloomConfig()
else:
lowerCAmelCase__ = BloomConfig.from_json_file(A )
if shard_model:
lowerCAmelCase__ = os.listdir(A )
lowerCAmelCase__ = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
lowerCAmelCase__ = {'''weight_map''': {}, '''metadata''': {}}
lowerCAmelCase__ = 0
lowerCAmelCase__ = None
lowerCAmelCase__ = BloomConfig()
for j, file in enumerate(A ):
print('''Processing file: {}'''.format(A ) )
lowerCAmelCase__ = None
for i in range(A ):
# load all TP files
lowerCAmelCase__ = file.replace('''model_00''' , F"""model_0{i}""" )
lowerCAmelCase__ = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(A )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
torch.save(
A , os.path.join(
A , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
lowerCAmelCase__ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
lowerCAmelCase__ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(A ) ).zfill(5 ) )
lowerCAmelCase__ = BloomConfig()
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
lowerCAmelCase__ = total_size
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(A , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
lowerCAmelCase__ = json.dumps(A , indent=2 , sort_keys=A ) + '''\n'''
f.write(A )
else:
lowerCAmelCase__ = BloomModel(A )
lowerCAmelCase__ = os.listdir(A )
lowerCAmelCase__ = sorted(filter(lambda A : s.startswith('''layer''' ) and "model_00" in s , A ) )
lowerCAmelCase__ = None
for i, file in enumerate(A ):
lowerCAmelCase__ = None
for i in range(A ):
# load all TP files
lowerCAmelCase__ = file.replace('''model_00''' , F"""model_0{i}""" )
lowerCAmelCase__ = torch.load(os.path.join(A , A ) , map_location='''cpu''' )
# Rename keys in the transformers names
lowerCAmelCase__ = list(temp.keys() )
for key in keys:
lowerCAmelCase__ = temp.pop(A )
if tensors is None:
lowerCAmelCase__ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
lowerCAmelCase__ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
lowerCAmelCase__ = torch.cat([tensors[key], temp[key]] , dim=A )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(A ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
lowerCAmelCase__ = tensors[key] / pretraining_tp
lowerCAmelCase__ = model.load_state_dict(A , strict=A )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
lowerCAmelCase__ = set(other_keys.missing_keys )
else:
lowerCAmelCase__ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCAmelCase__ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
lowerCAmelCase__ = model.to(config.torch_dtype )
torch.save(model.state_dict() , A )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(A , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bloom_checkpoint_path''',
default=None,
type=str,
required=True,
help='''Path to the Megatron-LM checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--bloom_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--shard_model''',
action='''store_true''',
help='''An optional setting to shard the output model \nThis enables sharding the converted checkpoint''',
)
parser.add_argument(
'''--pretraining_tp''',
default=4,
type=int,
help='''Pretraining TP rank that has been used when training the model in Megatron-LM \n''',
)
__UpperCAmelCase = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 228
| 0
|
from manim import *
class __magic_name__ ( lowerCAmelCase_ ):
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
__a =Rectangle(height=0.5 , width=0.5 )
__a =Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__a =[mem.copy() for i in range(6 )]
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
__a =Text('CPU' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
__a =[mem.copy() for i in range(4 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('GPU' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.move_to([-1, -1, 0] )
self.add(__snake_case )
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('Model' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.add(__snake_case )
__a =[]
for i, rect in enumerate(__snake_case ):
rect.set_stroke(__snake_case )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__a =Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=__snake_case , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=__snake_case , buff=0.0 )
self.add(__snake_case )
cpu_targs.append(__snake_case )
__a =[mem.copy() for i in range(6 )]
__a =VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
__a =Text('Loaded Checkpoint' , font_size=24 )
__a =Group(__snake_case , __snake_case ).arrange(__snake_case , aligned_edge=__snake_case , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__a =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__a =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(__snake_case , __snake_case )
__a =MarkupText(
f'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(__snake_case , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__a =MarkupText(
f'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case ) , Write(__snake_case ) )
self.play(Write(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) )
__a =[]
__a =[]
for i, rect in enumerate(__snake_case ):
__a =fill.copy().set_fill(__snake_case , opacity=0.7 )
target.move_to(__snake_case )
first_animations.append(GrowFromCenter(__snake_case , run_time=1 ) )
__a =target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 218
|
"""simple docstring"""
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
if len(lowercase__ ) == 1:
return True
_lowerCamelCase : List[Any] = series[1] - series[0]
for index in range(len(lowercase__ ) - 1 ):
if series[index + 1] - series[index] != common_diff:
return False
return True
def _snake_case ( lowercase__ ):
if not isinstance(lowercase__ , lowercase__ ):
raise ValueError('Input series is not valid, valid series - [2, 4, 6]' )
if len(lowercase__ ) == 0:
raise ValueError('Input list must be a non empty list' )
_lowerCamelCase : Optional[int] = 0
for val in series:
answer += val
return answer / len(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 0
|
'''simple docstring'''
__snake_case ="""0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 354
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__snake_case ={
"""configuration_swiftformer""": [
"""SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SwiftFormerConfig""",
"""SwiftFormerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SwiftFormerForImageClassification""",
"""SwiftFormerModel""",
"""SwiftFormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55
| 0
|
'''simple docstring'''
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 3
SCREAMING_SNAKE_CASE : List[Any] = 250
SCREAMING_SNAKE_CASE : Tuple = ids_tensor((batch_size, length), A )
SCREAMING_SNAKE_CASE : Optional[Any] = torch.ones((batch_size, length), device=A, dtype=torch.float ) / length
return input_ids, scores
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_tensors(5 )
SCREAMING_SNAKE_CASE : List[str] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=10 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(A, A ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = MaxLengthCriteria(max_length=10 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(A, A ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = MaxNewTokensCriteria(start_length=5, max_new_tokens=5 )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self._get_tensors(5 )
self.assertFalse(criteria(A, A ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(A, A ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self._get_tensors(10 )
self.assertTrue(criteria(A, A ) )
SCREAMING_SNAKE_CASE : str = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length, 10 )
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self._get_tensors(5 )
SCREAMING_SNAKE_CASE : List[Any] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(A, A ) )
SCREAMING_SNAKE_CASE : Tuple = MaxTimeCriteria(max_time=0.1, initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(A, A ) )
def UpperCamelCase_ ( self ):
'''simple docstring'''
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ), 10 )
with self.assertWarns(A ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ), 11 )
SCREAMING_SNAKE_CASE : Optional[int] = validate_stopping_criteria(StoppingCriteriaList(), 11 )
self.assertEqual(len(A ), 1 )
| 251
|
'''simple docstring'''
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : List[str] = ['''image_processor''', '''tokenizer''']
A : Optional[Any] = '''BlipImageProcessor'''
A : Any = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self, A, A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = False
super().__init__(A, A )
SCREAMING_SNAKE_CASE : Any = self.image_processor
def __call__( self, A = None, A = None, A = True, A = False, A = None, A = None, A = 0, A = None, A = None, A = False, A = False, A = False, A = False, A = False, A = True, A = None, **A, ):
'''simple docstring'''
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE : int = self.tokenizer
SCREAMING_SNAKE_CASE : Dict = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_token_type_ids=A, return_length=A, verbose=A, return_tensors=A, **A, )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE : Optional[int] = self.image_processor(A, return_tensors=A )
if text is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(
text=A, add_special_tokens=A, padding=A, truncation=A, max_length=A, stride=A, pad_to_multiple_of=A, return_attention_mask=A, return_overflowing_tokens=A, return_special_tokens_mask=A, return_offsets_mapping=A, return_token_type_ids=A, return_length=A, verbose=A, return_tensors=A, **A, )
else:
SCREAMING_SNAKE_CASE : int = None
if text_encoding is not None:
encoding_image_processor.update(A )
return encoding_image_processor
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*A, **A )
def UpperCamelCase_ ( self, *A, **A ):
'''simple docstring'''
return self.tokenizer.decode(*A, **A )
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 251
| 1
|
import numpy as np
import datasets
UpperCAmelCase__ : Optional[Any] ='''
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
'''
UpperCAmelCase__ : List[str] ='''\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
'''
UpperCAmelCase__ : Optional[Any] ='''
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{\'mahalanobis\': array([0.5])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _snake_case ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ),
} ) , )
def _snake_case ( self , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase =np.array(_SCREAMING_SNAKE_CASE )
lowerCamelCase =np.array(_SCREAMING_SNAKE_CASE )
# Assert that arrays are 2D
if len(X.shape ) != 2:
raise ValueError("""Expected `X` to be a 2D vector""" )
if len(reference_distribution.shape ) != 2:
raise ValueError("""Expected `reference_distribution` to be a 2D vector""" )
if reference_distribution.shape[0] < 2:
raise ValueError(
"""Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" )
# Get mahalanobis distance for each prediction
lowerCamelCase =X - np.mean(_SCREAMING_SNAKE_CASE )
lowerCamelCase =np.cov(reference_distribution.T )
try:
lowerCamelCase =np.linalg.inv(_SCREAMING_SNAKE_CASE )
except np.linalg.LinAlgError:
lowerCamelCase =np.linalg.pinv(_SCREAMING_SNAKE_CASE )
lowerCamelCase =np.dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCamelCase =np.dot(_SCREAMING_SNAKE_CASE , X_minus_mu.T ).diagonal()
return {"mahalanobis": mahal_dist}
| 371
|
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _lowercase ( _UpperCAmelCase = "isbn/0140328726" ) -> dict:
lowerCamelCase =olid.strip().strip("""/""" ) # Remove leading/trailing whitespace & slashes
if new_olid.count("""/""" ) != 1:
lowerCamelCase =F"""{olid} is not a valid Open Library olid"""
raise ValueError(_UpperCAmelCase )
return requests.get(F"""https://openlibrary.org/{new_olid}.json""" ).json()
def _lowercase ( _UpperCAmelCase ) -> dict:
lowerCamelCase ={
"""title""": """Title""",
"""publish_date""": """Publish date""",
"""authors""": """Authors""",
"""number_of_pages""": """Number of pages:""",
"""first_sentence""": """First sentence""",
"""isbn_10""": """ISBN (10)""",
"""isbn_13""": """ISBN (13)""",
}
lowerCamelCase ={better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
lowerCamelCase =[
get_openlibrary_data(author["""key"""] )["""name"""] for author in data["""Authors"""]
]
lowerCamelCase =data["""First sentence"""]["""value"""]
for key, value in data.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase =""", """.join(_UpperCAmelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ : List[str] =input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCAmelCase__ : Dict =summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('''\n'''.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 262
| 0
|
'''simple docstring'''
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCAmelCase =TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
__UpperCAmelCase =[]
__UpperCAmelCase =[]
__UpperCAmelCase ={"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
__UpperCAmelCase =[
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
__UpperCAmelCase =0
for log in Path().glob("*.log"):
__UpperCAmelCase =0
with open(log, "r") as f:
for line in f:
__UpperCAmelCase =json.loads(line)
if line.get("nodeid", "") != "":
__UpperCAmelCase =line["nodeid"]
if line.get("duration", None) is not None:
__UpperCAmelCase =f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCAmelCase =[]
log.unlink()
__UpperCAmelCase =""
__UpperCAmelCase =[]
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCAmelCase =[]
__UpperCAmelCase ={}
for test in failed_tests:
__UpperCAmelCase =test[0].split("::")
__UpperCAmelCase =data[0].split("/")[-1]
if data[0] not in filesafailed:
__UpperCAmelCase =[data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCAmelCase =[test[0] for test in failed_table]
__UpperCAmelCase =list(set(files))
# Count number of instances in failed_tests
__UpperCAmelCase =[]
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCAmelCase =tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_0_0_0:
__UpperCAmelCase ="Too many failed tests, please see the full report in the Action results."
__UpperCAmelCase =len(err) + 1_0
__UpperCAmelCase =message[: 3_0_0_0 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
__UpperCAmelCase ="No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
__UpperCAmelCase =WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
__UpperCAmelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
__UpperCAmelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
__UpperCAmelCase ={
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
__UpperCAmelCase =client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
__UpperCAmelCase =response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCAmelCase =""
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCAmelCase =row[0]
else:
__UpperCAmelCase =""
__UpperCAmelCase ={
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 67
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67
| 1
|
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger()
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_128s''', pretrained=lowercase__ )
else:
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_128''', pretrained=lowercase__ )
if hidden_sizes == 1_92:
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_192''', pretrained=lowercase__ )
if hidden_sizes == 2_56:
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_256''', pretrained=lowercase__ )
if hidden_sizes == 3_84:
SCREAMING_SNAKE_CASE_ = timm.create_model('''levit_384''', pretrained=lowercase__ )
from_model.eval()
SCREAMING_SNAKE_CASE_ = LevitForImageClassificationWithTeacher(lowercase__ ).eval()
SCREAMING_SNAKE_CASE_ = OrderedDict()
SCREAMING_SNAKE_CASE_ = from_model.state_dict()
SCREAMING_SNAKE_CASE_ = list(from_model.state_dict().keys() )
SCREAMING_SNAKE_CASE_ = list(our_model.state_dict().keys() )
print(len(lowercase__ ), len(lowercase__ ) )
for i in range(len(lowercase__ ) ):
SCREAMING_SNAKE_CASE_ = weights[og_keys[i]]
our_model.load_state_dict(lowercase__ )
SCREAMING_SNAKE_CASE_ = torch.randn((2, 3, 2_24, 2_24) )
SCREAMING_SNAKE_CASE_ = from_model(lowercase__ )
SCREAMING_SNAKE_CASE_ = our_model(lowercase__ ).logits
assert torch.allclose(lowercase__, lowercase__ ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE_ = name
print(lowercase__ )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
SCREAMING_SNAKE_CASE_ = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def A__ ( __lowerCamelCase, __lowerCamelCase = None, __lowerCamelCase = True ):
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = 10_00
SCREAMING_SNAKE_CASE_ = (1, num_labels)
SCREAMING_SNAKE_CASE_ = 'huggingface/label-files'
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(lowercase__, lowercase__, repo_type='''dataset''' ), '''r''' ) )
SCREAMING_SNAKE_CASE_ = {int(lowercase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = partial(lowercase__, num_labels=lowercase__, idalabel=lowercase__, labelaid=lowercase__ )
SCREAMING_SNAKE_CASE_ = {
'levit-128S': 1_28,
'levit-128': 1_28,
'levit-192': 1_92,
'levit-256': 2_56,
'levit-384': 3_84,
}
SCREAMING_SNAKE_CASE_ = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 6, 8], depths=[2, 3, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84], num_attention_heads=[4, 8, 12], depths=[4, 4, 4], key_dim=[16, 16, 16], drop_path_rate=0, ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84], num_attention_heads=[3, 5, 6], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12], num_attention_heads=[4, 6, 8], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0, ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68], num_attention_heads=[6, 9, 12], depths=[4, 4, 4], key_dim=[32, 32, 32], drop_path_rate=0.1, ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name], lowercase__, names_to_config[model_name], lowercase__, lowercase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name], lowercase__, lowercase__, lowercase__, lowercase__ )
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 362
|
def A__ ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCAmelCase = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 257
| 0
|
'''simple docstring'''
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
__lowerCAmelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[Any]:
if "xprophetnet" in prophetnet_checkpoint_path:
_a : List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
_a , _a : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
_a : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
_a , _a : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
_a : Any = ['key_proj', 'value_proj', 'query_proj']
_a : Dict = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
_a : Union[str, Any] = key.split('.' )
if attributes[0] == "lm_head":
_a : Optional[Any] = prophet
_a : str = prophet_old
else:
_a : List[str] = prophet.prophetnet
_a : Optional[Any] = prophet_old.model
_a : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
_a : Tuple = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
_a : Tuple = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
_a : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
_a : Any = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
_a : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
_a : Optional[Any] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
_a : List[str] = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , 'in_proj_weight' ):
_a : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
_a : Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
_a : Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
_a : Tuple = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
_a : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
_a : Optional[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
_a : Optional[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
_a : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
_a : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
_a : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:512, :] )
_a : Optional[int] = True
break
if attribute.isdigit():
_a : Tuple = model[int(lowerCAmelCase_ )]
_a : Dict = old_model[int(lowerCAmelCase_ )]
else:
_a : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
_a : Optional[Any] = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
_a : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--prophetnet_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCAmelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 89
|
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class __lowerCAmelCase :
UpperCamelCase__ = LEDConfig
UpperCamelCase__ = {}
UpperCamelCase__ = '''gelu'''
def __init__( self :Optional[int] , __magic_name__ :Dict , __magic_name__ :List[str]=13 , __magic_name__ :Union[str, Any]=7 , __magic_name__ :str=True , __magic_name__ :Union[str, Any]=False , __magic_name__ :Union[str, Any]=99 , __magic_name__ :List[Any]=32 , __magic_name__ :str=2 , __magic_name__ :List[str]=4 , __magic_name__ :str=37 , __magic_name__ :Any=0.1 , __magic_name__ :Dict=0.1 , __magic_name__ :List[str]=20 , __magic_name__ :Union[str, Any]=2 , __magic_name__ :List[Any]=1 , __magic_name__ :Optional[int]=0 , __magic_name__ :Optional[int]=4 , ):
'''simple docstring'''
a = parent
a = batch_size
a = seq_length
a = is_training
a = use_labels
a = vocab_size
a = hidden_size
a = num_hidden_layers
a = num_attention_heads
a = intermediate_size
a = hidden_dropout_prob
a = attention_probs_dropout_prob
a = max_position_embeddings
a = eos_token_id
a = pad_token_id
a = bos_token_id
a = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a = tf.concat([input_ids, eos_tensor] , axis=1 )
a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a = prepare_led_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
a = tf.concat(
[tf.zeros_like(__magic_name__ )[:, :-1], tf.ones_like(__magic_name__ )[:, -1:]] , axis=-1 , )
a = global_attention_mask
return config, inputs_dict
def lowerCamelCase__ ( self :Union[str, Any] , __magic_name__ :Optional[int] , __magic_name__ :List[Any] ):
'''simple docstring'''
a = TFLEDModel(config=__magic_name__ ).get_decoder()
a = inputs_dict["""input_ids"""]
a = input_ids[:1, :]
a = inputs_dict["""attention_mask"""][:1, :]
a = 1
# first forward pass
a = model(__magic_name__ , attention_mask=__magic_name__ , use_cache=__magic_name__ )
a , a = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a = ids_tensor((self.batch_size, 3) , config.vocab_size )
a = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a = tf.concat([input_ids, next_tokens] , axis=-1 )
a = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a = model(__magic_name__ , attention_mask=__magic_name__ )[0]
a = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a = output_from_no_past[:, -3:, random_slice_idx]
a = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1E-3 )
def __A ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , __lowerCamelCase=None , ) -> List[str]:
if attention_mask is None:
a = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
UpperCamelCase__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
UpperCamelCase__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
UpperCamelCase__ = (
{
'''conversational''': TFLEDForConditionalGeneration,
'''feature-extraction''': TFLEDModel,
'''summarization''': TFLEDForConditionalGeneration,
'''text2text-generation''': TFLEDForConditionalGeneration,
'''translation''': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
UpperCamelCase__ = True
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def lowerCamelCase__ ( self :Tuple ):
'''simple docstring'''
a = TFLEDModelTester(self )
a = ConfigTester(self , config_class=__magic_name__ )
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
def lowerCamelCase__ ( self :Optional[int] ):
'''simple docstring'''
a , a = self.model_tester.prepare_config_and_inputs_for_common()
a = tf.zeros_like(inputs_dict["""attention_mask"""] )
a = 2
a = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict["""global_attention_mask"""] , )
a = True
a = self.model_tester.seq_length
a = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(__magic_name__ :int ):
a = outputs.decoder_attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(__magic_name__ :Any ):
a = [t.numpy() for t in outputs.encoder_attentions]
a = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a = True
a = False
a = False
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
a = len(__magic_name__ )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
if self.is_encoder_decoder:
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_decoder_attentions_output(__magic_name__ )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a = True
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
# Check attention is always last and order is fine
a = True
a = True
a = model_class(__magic_name__ )
a = model(self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__magic_name__ ) )
self.assertEqual(model.config.output_hidden_states , __magic_name__ )
check_encoder_attentions_output(__magic_name__ )
@unittest.skip("""LED keeps using potentially symbolic tensors in conditionals and breaks tracing.""" )
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self :int ):
'''simple docstring'''
pass
def __A ( __lowerCamelCase ) -> int:
return tf.constant(__lowerCamelCase , dtype=tf.intaa )
__UpperCamelCase : int = 1E-4
@slow
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase__ ( self :Union[str, Any] ):
'''simple docstring'''
a = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" ).led
# change to intended input here
a = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a = prepare_led_inputs_dict(model.config , __magic_name__ , __magic_name__ )
a = model(**__magic_name__ )[0]
a = (1, 1024, 768)
self.assertEqual(output.shape , __magic_name__ )
# change to expected output here
a = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-3 )
def lowerCamelCase__ ( self :str ):
'''simple docstring'''
a = TFLEDForConditionalGeneration.from_pretrained("""allenai/led-base-16384""" )
# change to intended input here
a = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a = prepare_led_inputs_dict(model.config , __magic_name__ , __magic_name__ )
a = model(**__magic_name__ )[0]
a = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , __magic_name__ )
# change to expected output here
a = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , __magic_name__ , atol=1E-3 , rtol=1E-3 )
| 228
| 0
|
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _A ( snake_case_ ):
"""simple docstring"""
def __snake_case ( self : str):
a : List[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(__UpperCAmelCase , "width_multiplier"))
class _A :
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int=13 , __UpperCAmelCase : Optional[int]=64 , __UpperCAmelCase : Tuple=2 , __UpperCAmelCase : List[str]=3 , __UpperCAmelCase : Any="swish" , __UpperCAmelCase : Any=3 , __UpperCAmelCase : str=32 , __UpperCAmelCase : Tuple=0.1 , __UpperCAmelCase : str=0.02 , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : List[str]=True , __UpperCAmelCase : str=10 , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Any=0.25 , __UpperCAmelCase : List[str]=0.0 , __UpperCAmelCase : int=0.0 , ):
a : Optional[Any] = parent
a : List[Any] = batch_size
a : str = image_size
a : Union[str, Any] = patch_size
a : Tuple = num_channels
a : Dict = make_divisible(512 * width_multiplier , divisor=8)
a : Optional[int] = hidden_act
a : List[str] = conv_kernel_size
a : Dict = output_stride
a : Tuple = classifier_dropout_prob
a : Union[str, Any] = use_labels
a : Dict = is_training
a : List[str] = num_labels
a : Tuple = initializer_range
a : Any = scope
a : Union[str, Any] = width_multiplier
a : str = ffn_dropout
a : int = attn_dropout
def __snake_case ( self : Optional[int]):
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : Any = None
a : int = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels)
a : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a : Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case ( self : str):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def __snake_case ( self : Tuple , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : List[str]):
a : Any = MobileViTVaModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : Union[str, Any] = model(__UpperCAmelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : str , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str , __UpperCAmelCase : List[str] , __UpperCAmelCase : Any):
a : List[str] = self.num_labels
a : Any = MobileViTVaForImageClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[str] = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def __snake_case ( self : str , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : int):
a : int = self.num_labels
a : str = MobileViTVaForSemanticSegmentation(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a : List[Any] = model(__UpperCAmelCase)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a : Dict = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __snake_case ( self : Optional[Any]):
a : Optional[Any] = self.prepare_config_and_inputs()
a , a , a , a : str = config_and_inputs
a : List[str] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _A ( snake_case_ ,snake_case_ ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[Any] = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCAmelCase : str = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase : Dict = False
UpperCAmelCase : List[str] = False
UpperCAmelCase : str = False
UpperCAmelCase : Optional[Any] = False
def __snake_case ( self : str):
a : int = MobileViTVaModelTester(self)
a : Union[str, Any] = MobileViTVaConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds")
def __snake_case ( self : str):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings")
def __snake_case ( self : List[Any]):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions")
def __snake_case ( self : List[Any]):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run.")
def __snake_case ( self : Optional[int]):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def __snake_case ( self : List[str]):
pass
def __snake_case ( self : Tuple):
a , a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[Any] = model_class(__UpperCAmelCase)
a : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Optional[Any] = [*signature.parameters.keys()]
a : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def __snake_case ( self : Optional[Any]):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def __snake_case ( self : List[Any]):
def check_hidden_states_output(__UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[int]):
a : List[str] = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a : int = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a : Dict = outputs.hidden_states
a : List[Any] = 5
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
a : Union[str, Any] = 2
for i in range(len(__UpperCAmelCase)):
self.assertListEqual(
list(hidden_states[i].shape[-2:]) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2)
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Optional[int] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
def __snake_case ( self : Optional[int]):
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCAmelCase)
@slow
def __snake_case ( self : Optional[int]):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : int = MobileViTVaModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def lowercase ( )-> Union[str, Any]:
'''simple docstring'''
a : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __snake_case ( self : Any):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256")
if is_vision_available()
else None
)
@slow
def __snake_case ( self : Any):
a : List[Any] = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256").to(
__UpperCAmelCase)
a : Optional[Any] = self.default_image_processor
a : Optional[Any] = prepare_img()
a : Any = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a : Optional[int] = model(**__UpperCAmelCase)
# verify the logits
a : Dict = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a : str = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1e-4))
@slow
def __snake_case ( self : List[Any]):
a : Tuple = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
a : Optional[int] = model.to(__UpperCAmelCase)
a : Union[str, Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
a : Dict = prepare_img()
a : List[str] = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a : Union[str, Any] = model(**__UpperCAmelCase)
a : Any = outputs.logits
# verify the logits
a : str = torch.Size((1, 21, 32, 32))
self.assertEqual(logits.shape , __UpperCAmelCase)
a : Union[str, Any] = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=__UpperCAmelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __UpperCAmelCase , atol=1e-4))
@slow
def __snake_case ( self : Tuple):
a : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
a : Optional[Any] = model.to(__UpperCAmelCase)
a : Any = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3")
a : Optional[Any] = prepare_img()
a : Dict = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a : Tuple = model(**__UpperCAmelCase)
a : str = outputs.logits.detach().cpu()
a : Dict = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase , target_sizes=[(50, 60)])
a : Dict = torch.Size((50, 60))
self.assertEqual(segmentation[0].shape , __UpperCAmelCase)
a : str = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase)
a : Union[str, Any] = torch.Size((32, 32))
self.assertEqual(segmentation[0].shape , __UpperCAmelCase)
| 353
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__lowercase = logging.get_logger(__name__)
class _A ( _a ):
"""simple docstring"""
def __init__( self : List[Any] , *__UpperCAmelCase : int , **__UpperCAmelCase : List[Any]):
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __UpperCAmelCase , )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase)
| 226
| 0
|
class __UpperCAmelCase :
def __init__( self: List[str] , UpperCAmelCase_: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = [0] * len_array
if len_array > 0:
_SCREAMING_SNAKE_CASE = array[0]
for i in range(1 , UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE = self.prefix_sum[i - 1] + array[i]
def UpperCamelCase ( self: int , UpperCAmelCase_: str , UpperCAmelCase_: str ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCamelCase ( self: List[str] , UpperCAmelCase_: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCAmelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 306
|
'''simple docstring'''
from __future__ import annotations
from typing import Generic, TypeVar
a_ : List[str] = TypeVar("""T""")
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = data
lowerCamelCase_ = self
lowerCamelCase_ = 0
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# map from node name to the node object
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# create a new set with x as its member
lowerCamelCase_ = DisjointSetTreeNode(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# find the set x belongs to (with path-compression)
lowerCamelCase_ = self.map[data]
if elem_ref != elem_ref.parent:
lowerCamelCase_ = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# helper function for union operation
if nodea.rank > nodea.rank:
lowerCamelCase_ = nodea
else:
lowerCamelCase_ = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# merge 2 disjoint sets
self.link(self.find_set(UpperCamelCase ) , self.find_set(UpperCamelCase ) )
class snake_case ( Generic[T] ):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
# connections: map from the node to the neighbouring nodes (with weights)
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
# add a node ONLY if its not present in the graph
if node not in self.connections:
lowerCamelCase_ = {}
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
# add an edge with the given weight
self.add_node(UpperCamelCase )
self.add_node(UpperCamelCase )
lowerCamelCase_ = weight
lowerCamelCase_ = weight
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = []
lowerCamelCase_ = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda UpperCamelCase : x[2] )
# creating the disjoint set
lowerCamelCase_ = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(UpperCamelCase )
# MST generation
lowerCamelCase_ = 0
lowerCamelCase_ = 0
lowerCamelCase_ = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = edges[index]
index += 1
lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase )
lowerCamelCase_ = disjoint_set.find_set(UpperCamelCase )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(UpperCamelCase , UpperCamelCase , UpperCamelCase )
disjoint_set.union(UpperCamelCase , UpperCamelCase )
return graph
| 55
| 0
|
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
def wrapper(*__lowerCAmelCase , **__lowerCAmelCase ):
snake_case__ : str = timeit.default_timer()
snake_case__ : List[Any] = func(*__lowerCAmelCase , **__lowerCAmelCase )
snake_case__ : List[Any] = timeit.default_timer() - starttime
return delta
snake_case__ : int = func.__name__
return wrapper
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=None ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[Any] = []
snake_case__ : Optional[int] = seq_shapes or {}
for i in range(__lowerCAmelCase ):
snake_case__ : Any = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__lowerCAmelCase , _ArrayXD ):
snake_case__ : Dict = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
snake_case__ : str = '''The small grey turtle was surprisingly fast when challenged.'''
else:
snake_case__ : List[Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__lowerCAmelCase , datasets.Sequence ):
while isinstance(__lowerCAmelCase , datasets.Sequence ):
snake_case__ : List[str] = v.feature
snake_case__ : Optional[int] = seq_shapes[k]
snake_case__ : Dict = np.random.rand(*__lowerCAmelCase ).astype(v.dtype )
snake_case__ : Tuple = data
dummy_data.append((i, example) )
return dummy_data
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=100 , __lowerCAmelCase=None ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : int = generate_examples(__lowerCAmelCase , num_examples=__lowerCAmelCase , seq_shapes=__lowerCAmelCase )
with ArrowWriter(features=__lowerCAmelCase , path=__lowerCAmelCase ) as writer:
for key, record in dummy_data:
snake_case__ : List[Any] = features.encode_example(__lowerCAmelCase )
writer.write(__lowerCAmelCase )
snake_case__ : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
snake_case__ : Union[str, Any] = datasets.Dataset.from_file(filename=__lowerCAmelCase , info=datasets.DatasetInfo(features=__lowerCAmelCase ) )
return dataset
| 356
|
A__ = 0 # The first color of the flag.
A__ = 1 # The second color of the flag.
A__ = 2 # The third color of the flag.
A__ = (red, white, blue)
def _lowerCAmelCase ( __lowerCAmelCase ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(__lowerCAmelCase ) == 1:
return list(__lowerCAmelCase )
snake_case__ : List[Any] = 0
snake_case__ : str = len(__lowerCAmelCase ) - 1
snake_case__ : List[Any] = 0
while mid <= high:
if sequence[mid] == colors[0]:
snake_case__ , snake_case__ : List[Any] = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
snake_case__ , snake_case__ : int = sequence[high], sequence[mid]
high -= 1
else:
snake_case__ : List[Any] = f"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
A__ = input('''Enter numbers separated by commas:\n''').strip()
A__ = [int(item.strip()) for item in user_input.split(''',''')]
print(f"""{dutch_national_flag_sort(unsorted)}""")
| 44
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = None
class a_ ( snake_case_ , snake_case_ ):
'''simple docstring'''
UpperCamelCase = 2
@register_to_config
def __init__( self , A = 0.02 , A = 100 , A = 1.007 , A = 80 , A = 0.05 , A = 50 , ) -> Optional[Any]:
# standard deviation of the initial noise distribution
_SCREAMING_SNAKE_CASE = sigma_max
# setable values
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = None # sigma(t_i)
def snake_case_( self , A , A = None ) -> torch.FloatTensor:
return sample
def snake_case_( self , A , A = None ) -> List[Any]:
_SCREAMING_SNAKE_CASE = num_inference_steps
_SCREAMING_SNAKE_CASE = np.arange(0 , self.num_inference_steps )[::-1].copy()
_SCREAMING_SNAKE_CASE = torch.from_numpy(A ).to(A )
_SCREAMING_SNAKE_CASE = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_SCREAMING_SNAKE_CASE = torch.tensor(A , dtype=torch.floataa , device=A )
def snake_case_( self , A , A , A = None ) -> Tuple[torch.FloatTensor, float]:
if self.config.s_min <= sigma <= self.config.s_max:
_SCREAMING_SNAKE_CASE = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_SCREAMING_SNAKE_CASE = 0
# sample eps ~ N(0, S_noise^2 * I)
_SCREAMING_SNAKE_CASE = self.config.s_noise * randn_tensor(sample.shape , generator=A ).to(sample.device )
_SCREAMING_SNAKE_CASE = sigma + gamma * sigma
_SCREAMING_SNAKE_CASE = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def snake_case_( self , A , A , A , A , A = True , ) -> Union[KarrasVeOutput, Tuple]:
_SCREAMING_SNAKE_CASE = sample_hat + sigma_hat * model_output
_SCREAMING_SNAKE_CASE = (sample_hat - pred_original_sample) / sigma_hat
_SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A , derivative=A , pred_original_sample=A )
def snake_case_( self , A , A , A , A , A , A , A = True , ) -> Union[KarrasVeOutput, Tuple]:
_SCREAMING_SNAKE_CASE = sample_prev + sigma_prev * model_output
_SCREAMING_SNAKE_CASE = (sample_prev - pred_original_sample) / sigma_prev
_SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A , derivative=A , pred_original_sample=A )
def snake_case_( self , A , A , A ) -> List[Any]:
raise NotImplementedError()
| 58
|
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class snake_case__:
'''simple docstring'''
@staticmethod
def lowercase_ ( *__lowercase , **__lowercase ) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_torch
class snake_case__( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowercase_ ( self , __lowercase , __lowercase , __lowercase ) -> List[str]:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : str = [
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
]
return object_detector, examples
def lowercase_ ( self , __lowercase , __lowercase ) -> str:
lowerCAmelCase_ : Tuple = object_detector(examples[0] , threshold=0.0 )
lowerCAmelCase_ : Dict = len(__lowercase )
self.assertGreater(__lowercase , 0 )
self.assertEqual(
__lowercase , [
{
'''score''': ANY(__lowercase ),
'''label''': ANY(__lowercase ),
'''box''': {'''xmin''': ANY(__lowercase ), '''ymin''': ANY(__lowercase ), '''xmax''': ANY(__lowercase ), '''ymax''': ANY(__lowercase )},
}
for i in range(__lowercase )
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
def lowercase_ ( self ) -> int:
lowerCAmelCase_ : Union[str, Any] = pipeline(
'''zero-shot-object-detection''' , model='''hf-internal-testing/tiny-random-owlvit-object-detection''' )
lowerCAmelCase_ : Union[str, Any] = object_detector(
'''./tests/fixtures/tests_samples/COCO/000000039769.png''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
] , )
lowerCAmelCase_ : Union[str, Any] = object_detector(
[
{
'''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.72_35, '''label''': '''cat''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.72_18, '''label''': '''remote''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.71_84, '''label''': '''couch''', '''box''': {'''xmin''': 2_0_4, '''ymin''': 1_6_7, '''xmax''': 2_3_2, '''ymax''': 1_9_0}},
{'''score''': 0.67_48, '''label''': '''remote''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_56, '''label''': '''cat''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.66_14, '''label''': '''couch''', '''box''': {'''xmin''': 5_7_1, '''ymin''': 8_3, '''xmax''': 5_9_8, '''ymax''': 1_0_3}},
{'''score''': 0.64_56, '''label''': '''remote''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
{'''score''': 0.6_42, '''label''': '''remote''', '''box''': {'''xmin''': 6_7, '''ymin''': 2_7_4, '''xmax''': 9_3, '''ymax''': 2_9_7}},
{'''score''': 0.64_19, '''label''': '''cat''', '''box''': {'''xmin''': 4_9_4, '''ymin''': 1_0_5, '''xmax''': 5_2_1, '''ymax''': 1_2_7}},
]
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Union[str, Any]:
lowerCAmelCase_ : Any = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Dict = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
] , )
lowerCAmelCase_ : Tuple = object_detector(
[
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
{
'''image''': '''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''candidate_labels''': ['''cat''', '''remote''', '''couch'''],
},
] , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
[
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
{'''score''': 0.14_74, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_5, '''ymin''': 7_4, '''xmax''': 3_7_1, '''ymax''': 1_8_7}},
{'''score''': 0.12_08, '''label''': '''couch''', '''box''': {'''xmin''': 4, '''ymin''': 0, '''xmax''': 6_4_2, '''ymax''': 4_7_6}},
],
] , )
@require_tf
@unittest.skip('''Zero Shot Object Detection not implemented in TF''' )
def lowercase_ ( self ) -> List[str]:
pass
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Any = 0.2
lowerCAmelCase_ : List[Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , threshold=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
{'''score''': 0.25_37, '''label''': '''cat''', '''box''': {'''xmin''': 1, '''ymin''': 5_5, '''xmax''': 3_1_5, '''ymax''': 4_7_2}},
] , )
@require_torch
@slow
def lowercase_ ( self ) -> Optional[int]:
lowerCAmelCase_ : Dict = 2
lowerCAmelCase_ : Union[str, Any] = pipeline('''zero-shot-object-detection''' )
lowerCAmelCase_ : Optional[Any] = object_detector(
'''http://images.cocodataset.org/val2017/000000039769.jpg''' , candidate_labels=['''cat''', '''remote''', '''couch'''] , top_k=__lowercase , )
self.assertEqual(
nested_simplify(__lowercase , decimals=4 ) , [
{'''score''': 0.28_68, '''label''': '''cat''', '''box''': {'''xmin''': 3_2_4, '''ymin''': 2_0, '''xmax''': 6_4_0, '''ymax''': 3_7_3}},
{'''score''': 0.2_77, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_2, '''xmax''': 1_7_7, '''ymax''': 1_1_5}},
] , )
| 262
| 0
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCAmelCase: Tuple = "scheduler_config.json"
class a__( __lowerCAmelCase ):
lowercase__ = 1
lowercase__ = 2
lowercase__ = 3
lowercase__ = 4
lowercase__ = 5
@dataclass
class a__( __lowerCAmelCase ):
lowercase__ = 42
class a__:
lowercase__ = SCHEDULER_CONFIG_NAME
lowercase__ = ["dtype"]
lowercase__ = []
lowercase__ = True
@classmethod
def lowercase_ ( cls : int , __snake_case : Dict[str, Any] = None , __snake_case : Optional[str] = None , __snake_case : int=False , **__snake_case : Optional[int] , ):
a : Optional[Any] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase__ , subfolder=lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ , )
a : Dict = cls.from_config(lowerCamelCase__ , return_unused_kwargs=lowerCamelCase__ , **lowerCamelCase__ )
if hasattr(lowerCamelCase__ , 'create_state' ) and getattr(lowerCamelCase__ , 'has_state' , lowerCamelCase__ ):
a : Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def lowercase_ ( self : Optional[Any] , __snake_case : Union[str, os.PathLike] , __snake_case : bool = False , **__snake_case : Optional[Any] ):
self.save_config(save_directory=lowerCamelCase__ , push_to_hub=lowerCamelCase__ , **lowerCamelCase__ )
@property
def lowercase_ ( self : List[str] ):
return self._get_compatibles()
@classmethod
def lowercase_ ( cls : Optional[int] ):
a : str = list(set([cls.__name__] + cls._compatibles ) )
a : Union[str, Any] = importlib.import_module(__name__.split('.' )[0] )
a : int = [
getattr(lowerCamelCase__ , lowerCamelCase__ ) for c in compatible_classes_str if hasattr(lowerCamelCase__ , lowerCamelCase__ )
]
return compatible_classes
def lowerCamelCase__ ( _A , _A ):
assert len(_A ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_A ) - x.ndim) ) , _A )
def lowerCamelCase__ ( _A , _A=0.999 , _A=jnp.floataa ):
def alpha_bar(_A ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
a : Union[str, Any] = []
for i in range(_A ):
a : Union[str, Any] = i / num_diffusion_timesteps
a : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_A ) / alpha_bar(_A ) , _A ) )
return jnp.array(_A , dtype=_A )
@flax.struct.dataclass
class a__:
lowercase__ = 42
lowercase__ = 42
lowercase__ = 42
@classmethod
def lowercase_ ( cls : List[Any] , __snake_case : Union[str, Any] ):
a : List[Any] = scheduler.config
if config.trained_betas is not None:
a : Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
a : Dict = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a : Optional[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a : int = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
a : Union[str, Any] = 1.0 - betas
a : Optional[int] = jnp.cumprod(lowerCamelCase__ , axis=0 )
return cls(
alphas=lowerCamelCase__ , betas=lowerCamelCase__ , alphas_cumprod=lowerCamelCase__ , )
def lowerCamelCase__ ( _A , _A , _A , _A ):
a : Optional[Any] = state.alphas_cumprod
a : Optional[int] = alphas_cumprod[timesteps] ** 0.5
a : List[Any] = sqrt_alpha_prod.flatten()
a : Union[str, Any] = broadcast_to_shape_from_left(_A , original_samples.shape )
a : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
a : str = sqrt_one_minus_alpha_prod.flatten()
a : Optional[Any] = broadcast_to_shape_from_left(_A , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCamelCase__ ( _A , _A , _A , _A ):
a : Dict = get_sqrt_alpha_prod(_A , _A , _A , _A )
a : Union[str, Any] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCamelCase__ ( _A , _A , _A , _A ):
a : Dict = get_sqrt_alpha_prod(_A , _A , _A , _A )
a : int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 357
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Optional[int] ):
a : int = ''
a : List[str] = ''
a : int = []
a : Optional[Any] = 0
a : Optional[Any] = 2_56
a : int = 0
a : Optional[int] = 0
a : str = 0
a : int = 0
def lowercase_ ( self : List[str] , __snake_case : int ):
a : Optional[Any] = cva.imread(__snake_case , 0 )
a : int = copy.deepcopy(self.img )
a , a , a : Optional[int] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : str = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : List[str] = x[i] / self.k
self.sk += prk
a : List[Any] = (self.L - 1) * self.sk
if self.rem != 0:
a : Union[str, Any] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : int = int(np.ma.count(self.img ) / self.img[1].size )
a : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Tuple = self.img[j][i]
if num != self.last_list[num]:
a : Union[str, Any] = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Union[str, Any] ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : Any ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Dict = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 96
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A : Union[str, Any] = logging.get_logger(__name__)
A : int = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class __A( a ):
snake_case_ = '''falcon'''
snake_case_ = ['''past_key_values''']
def __init__( self , _snake_case=65_024 , _snake_case=4_544 , _snake_case=32 , _snake_case=71 , _snake_case=1E-5 , _snake_case=0.02 , _snake_case=True , _snake_case=0.0 , _snake_case=0.0 , _snake_case=None , _snake_case=False , _snake_case=False , _snake_case=True , _snake_case=True , _snake_case=False , _snake_case=11 , _snake_case=11 , **_snake_case , ) -> List[Any]:
'''simple docstring'''
__a = vocab_size
# Backward compatibility with n_embed kwarg
__a = kwargs.pop('''n_embed''' , _snake_case )
__a = hidden_size if n_embed is None else n_embed
__a = num_hidden_layers
__a = num_attention_heads
__a = layer_norm_epsilon
__a = initializer_range
__a = use_cache
__a = hidden_dropout
__a = attention_dropout
__a = bos_token_id
__a = eos_token_id
__a = num_attention_heads if num_kv_heads is None else num_kv_heads
__a = alibi
__a = new_decoder_architecture
__a = multi_query # Ignored when new_decoder_architecture is True
__a = parallel_attn
__a = bias
super().__init__(bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Tuple:
'''simple docstring'''
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> List[str]:
'''simple docstring'''
return not self.alibi
| 6
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : jnp.ndarray
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
__SCREAMING_SNAKE_CASE = self.block_out_channels[i]
__SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = blocks
__SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
for block in self.blocks:
__SCREAMING_SNAKE_CASE = block(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
__SCREAMING_SNAKE_CASE = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = 32
UpperCamelCase__ : int = 4
UpperCamelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Union[bool, Tuple[bool]] = False
UpperCamelCase__ : Tuple[int] = (320, 640, 1280, 1280)
UpperCamelCase__ : int = 2
UpperCamelCase__ : Union[int, Tuple[int]] = 8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase__ : int = 1280
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = False
UpperCamelCase__ : jnp.dtype = jnp.floataa
UpperCamelCase__ : bool = True
UpperCamelCase__ : int = 0
UpperCamelCase__ : str = "rgb"
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = jax.random.split(_A )
__SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.block_out_channels
__SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(_A , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = block_out_channels[0]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE = output_channel
__SCREAMING_SNAKE_CASE = block_out_channels[i]
__SCREAMING_SNAKE_CASE = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
__SCREAMING_SNAKE_CASE = down_blocks
__SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
__SCREAMING_SNAKE_CASE = block_out_channels[-1]
__SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__SCREAMING_SNAKE_CASE = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
__SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(_A , 0 )
__SCREAMING_SNAKE_CASE = self.time_proj(_A )
__SCREAMING_SNAKE_CASE = self.time_embedding(_A )
# 2. pre-process
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
__SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , _A , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__SCREAMING_SNAKE_CASE = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
__SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
__SCREAMING_SNAKE_CASE = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
__SCREAMING_SNAKE_CASE = self.controlnet_mid_block(_A )
# 6. scaling
__SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257
| 0
|
'''simple docstring'''
def _lowerCAmelCase ( lowerCamelCase_ : Any ):
for i in range(0 , lowerCamelCase_ ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(''' ''' , end='''''' )
for _ in range(0 , i + 1 ): # printing stars
print('''* ''' , end='''''' )
print()
def _lowerCAmelCase ( lowerCamelCase_ : int ):
for i in range(lowerCamelCase_ , 0 , -1 ):
for _ in range(lowerCamelCase_ , 0 , -1 ): # printing stars
print('''* ''' , end='''''' )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(''' ''' , end='''''' )
def _lowerCAmelCase ( lowerCamelCase_ : Optional[int] ):
if n <= 0:
print(''' ... .... nothing printing :(''' )
return
floyd(lowerCamelCase_ ) # upper half
reverse_floyd(lowerCamelCase_ ) # lower half
if __name__ == "__main__":
print(R'''| /\ | |- | |- |--| |\ /| |-''')
print(R'''|/ \| |- |_ |_ |__| | \/ | |_''')
_SCREAMING_SNAKE_CASE = 1
while K:
_SCREAMING_SNAKE_CASE = int(input('''enter the number and , and see the magic : '''))
print()
pretty_print(user_number)
_SCREAMING_SNAKE_CASE = int(input('''press 0 to exit... and 1 to continue...'''))
print('''Good Bye...''')
| 361
|
'''simple docstring'''
from math import sqrt
def _lowerCAmelCase ( lowerCamelCase_ : int ):
__lowercase = 0
for i in range(1 , int(sqrt(lowerCamelCase_ ) + 1 ) ):
if n % i == 0 and i != sqrt(lowerCamelCase_ ):
total += i + n // i
elif i == sqrt(lowerCamelCase_ ):
total += i
return total - n
def _lowerCAmelCase ( lowerCamelCase_ : int = 1_0_0_0_0 ):
__lowercase = sum(
i
for i in range(1 , lowerCamelCase_ )
if sum_of_divisors(sum_of_divisors(lowerCamelCase_ ) ) == i and sum_of_divisors(lowerCamelCase_ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 217
| 0
|
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = RoFormerTokenizer
__lowercase = RoFormerTokenizerFast
__lowercase = True
__lowercase = True
def lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowerCAmelCase_ )
def lowerCamelCase ( self , **lowerCAmelCase_ ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' , **lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = '永和服装饰品有限公司,今天天气非常好'
_snake_case = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_tokenizer()
_snake_case , _snake_case = self.get_chinese_input_output_texts()
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , output_text.split() )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_rust_tokenizer()
_snake_case , _snake_case = self.get_chinese_input_output_texts()
_snake_case = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , output_text.split() )
_snake_case = tokens + [tokenizer.unk_token]
_snake_case = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
def lowerCamelCase ( self ):
"""simple docstring"""
pass
| 42
|
def a ( _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : Any = 0
__UpperCAmelCase : str = len(_UpperCAmelCase )
for i in range(n - 1 ):
for j in range(i + 1 , _UpperCAmelCase ):
if arr[i] > arr[j]:
num_inversions += 1
return num_inversions
def a ( _UpperCAmelCase : str ):
'''simple docstring'''
if len(_UpperCAmelCase ) <= 1:
return arr, 0
__UpperCAmelCase : Dict = len(_UpperCAmelCase ) // 2
__UpperCAmelCase : Union[str, Any] = arr[0:mid]
__UpperCAmelCase : Optional[Any] = arr[mid:]
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = count_inversions_recursive(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = _count_cross_inversions(_UpperCAmelCase , _UpperCAmelCase )
__UpperCAmelCase : List[str] = inversion_p + inversions_q + cross_inversions
return c, num_inversions
def a ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ):
'''simple docstring'''
__UpperCAmelCase : List[Any] = []
__UpperCAmelCase : Any = 0
while i < len(_UpperCAmelCase ) and j < len(_UpperCAmelCase ):
if p[i] > q[j]:
# if P[1] > Q[j], then P[k] > Q[k] for all i < k <= len(P)
# These are all inversions. The claim emerges from the
# property that P is sorted.
num_inversion += len(_UpperCAmelCase ) - i
r.append(q[j] )
j += 1
else:
r.append(p[i] )
i += 1
if i < len(_UpperCAmelCase ):
r.extend(p[i:] )
else:
r.extend(q[j:] )
return r, num_inversion
def a ( ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = [10, 2, 1, 5, 5, 2, 11]
# this arr has 8 inversions:
# (10, 2), (10, 1), (10, 5), (10, 5), (10, 2), (2, 1), (5, 2), (5, 2)
__UpperCAmelCase : List[str] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : Tuple = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 8
print('''number of inversions = ''' , _UpperCAmelCase )
# testing an array with zero inversion (a sorted arr_1)
arr_a.sort()
__UpperCAmelCase : Any = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
# an empty list should also have zero inversions
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : Union[str, Any] = count_inversions_bf(_UpperCAmelCase )
__UpperCAmelCase , __UpperCAmelCase : int = count_inversions_recursive(_UpperCAmelCase )
assert num_inversions_bf == num_inversions_recursive == 0
print('''number of inversions = ''' , _UpperCAmelCase )
if __name__ == "__main__":
main()
| 226
| 0
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a__ : int = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class UpperCamelCase_ ( nn.Module):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase__ : Optional[Any] ) -> List[Any]:
super().__init__()
__SCREAMING_SNAKE_CASE = torchvision.models.resnetaaa(pretrained=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = list(model.children() )[:-2]
__SCREAMING_SNAKE_CASE = nn.Sequential(*UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : Any ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
__SCREAMING_SNAKE_CASE = self.pool(self.model(UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE = torch.flatten(UpperCAmelCase__ , start_dim=2 )
__SCREAMING_SNAKE_CASE = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : int , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE = [json.loads(UpperCAmelCase__ ) for l in open(UpperCAmelCase__ )]
__SCREAMING_SNAKE_CASE = os.path.dirname(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = labels
__SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = transforms
def __len__( self : List[Any] ) -> Dict:
return len(self.data )
def __getitem__( self : List[Any] , UpperCAmelCase__ : int ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] , add_special_tokens=UpperCAmelCase__ ) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = sentence[0], sentence[1:-1], sentence[-1]
__SCREAMING_SNAKE_CASE = sentence[: self.max_seq_length]
__SCREAMING_SNAKE_CASE = torch.zeros(self.n_classes )
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = Image.open(os.path.join(self.data_dir , self.data[index]["img"] ) ).convert("RGB" )
__SCREAMING_SNAKE_CASE = self.transforms(UpperCAmelCase__ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [len(row["sentence"] ) for row in batch]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(lowerCAmelCase_ ), max(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
__SCREAMING_SNAKE_CASE = torch.zeros(lowerCAmelCase_ , lowerCAmelCase_ , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(lowerCAmelCase_ , lowerCAmelCase_ ) ):
__SCREAMING_SNAKE_CASE = input_row["sentence"]
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = torch.stack([row["image"] for row in batch] )
__SCREAMING_SNAKE_CASE = torch.stack([row["label"] for row in batch] )
__SCREAMING_SNAKE_CASE = torch.stack([row["image_start_token"] for row in batch] )
__SCREAMING_SNAKE_CASE = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def UpperCAmelCase__ ():
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def UpperCAmelCase__ ():
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 195
|
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a__ : Tuple = logging.get_logger(__name__)
a__ : List[Any] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Any = "efficientformer"
def __init__( self : Any , UpperCAmelCase__ : List[int] = [3, 2, 6, 4] , UpperCAmelCase__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , UpperCAmelCase__ : List[bool] = [True, True, True, True] , UpperCAmelCase__ : int = 4_4_8 , UpperCAmelCase__ : int = 3_2 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : int = 7 , UpperCAmelCase__ : int = 5 , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 4 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1_6 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 3 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1E-5 , UpperCAmelCase__ : str = "gelu" , UpperCAmelCase__ : float = 0.02 , UpperCAmelCase__ : float = 1E-12 , UpperCAmelCase__ : int = 2_2_4 , UpperCAmelCase__ : float = 1E-05 , **UpperCAmelCase__ : Tuple , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
__SCREAMING_SNAKE_CASE = downsamples
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = key_dim
__SCREAMING_SNAKE_CASE = attention_ratio
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = pool_size
__SCREAMING_SNAKE_CASE = downsample_patch_size
__SCREAMING_SNAKE_CASE = downsample_stride
__SCREAMING_SNAKE_CASE = downsample_pad
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = num_metaad_blocks
__SCREAMING_SNAKE_CASE = distillation
__SCREAMING_SNAKE_CASE = use_layer_scale
__SCREAMING_SNAKE_CASE = layer_scale_init_value
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = batch_norm_eps
| 195
| 1
|
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_a : List[str]= BertJapaneseTokenizer
_a : Optional[Any]= False
_a : List[Any]= True
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""こんにちは""",
"""こん""",
"""にちは""",
"""ばんは""",
"""##こん""",
"""##にちは""",
"""##ばんは""",
"""世界""",
"""##世界""",
"""、""",
"""##、""",
"""。""",
"""##。""",
]
lowercase : Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = """こんにちは、世界。 \nこんばんは、世界。"""
lowercase : Tuple = """こんにちは 、 世界 。 こんばんは 、 世界 。"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : int = self.get_input_output_texts(a__ )
lowercase : List[Any] = tokenizer.encode(a__ ,add_special_tokens=a__ )
lowercase : Optional[int] = tokenizer.decode(a__ ,clean_up_tokenization_spaces=a__ )
return text, ids
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.tokenizer_class(self.vocab_file )
lowercase : str = tokenizer.tokenize("""こんにちは、世界。\nこんばんは、世界。""" )
self.assertListEqual(a__ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""mecab""" )
self.assertIsNotNone(a__ )
lowercase : str = """こんにちは、世界。\nこんばんは、世界。"""
lowercase : int = tokenizer.tokenize(a__ )
self.assertListEqual(a__ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Dict = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(a__ ,"""wb""" ) as handle:
pickle.dump(a__ ,a__ )
with open(a__ ,"""rb""" ) as handle:
lowercase : List[Any] = pickle.load(a__ )
lowercase : List[Any] = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ ,a__ )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = MecabTokenizer(mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
lowercase : List[Any] = MecabTokenizer(mecab_dic="""unidic_lite""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
lowercase : str = MecabTokenizer(mecab_dic="""unidic""" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = MecabTokenizer(do_lower_case=a__ ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iphone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
try:
lowercase : Any = MecabTokenizer(
do_lower_case=a__ ,normalize_text=a__ ,mecab_option="""-d /usr/local/lib/mecab/dic/jumandic""" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = MecabTokenizer(normalize_text=a__ ,mecab_dic="""ipadic""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップルストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """ """, """。"""] ,)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""sudachi""" )
self.assertIsNotNone(a__ )
lowercase : List[str] = """こんにちは、世界。\nこんばんは、世界。"""
lowercase : int = tokenizer.tokenize(a__ )
self.assertListEqual(a__ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Tuple = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(a__ ,"""wb""" ) as handle:
pickle.dump(a__ ,a__ )
with open(a__ ,"""rb""" ) as handle:
lowercase : Union[str, Any] = pickle.load(a__ )
lowercase : Tuple = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ ,a__ )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = SudachiTokenizer(sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Tuple = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""A""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国""", """人""", """参政""", """権"""] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""B""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人""", """参政権"""] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = SudachiTokenizer(sudachi_dict_type="""core""" ,sudachi_split_mode="""C""" )
self.assertListEqual(tokenizer.tokenize("""外国人参政権""" ) ,["""外国人参政権"""] )
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = SudachiTokenizer(do_lower_case=a__ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iphone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """ """, """。""", """ """, """ """] ,)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Dict = SudachiTokenizer(normalize_text=a__ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,[""" """, """\t""", """アップル""", """ストア""", """で""", """iPhone""", """8""", """ """, """が""", """ """, """ """, """\n """, """発売""", """さ""", """れ""", """た""", """\u3000""", """。""", """ """, """ """] ,)
@require_sudachi
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = SudachiTokenizer(trim_whitespace=a__ ,sudachi_dict_type="""core""" )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れ""", """た""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = self.tokenizer_class(self.vocab_file ,word_tokenizer_type="""jumanpp""" )
self.assertIsNotNone(a__ )
lowercase : Any = """こんにちは、世界。\nこんばんは、世界。"""
lowercase : Union[str, Any] = tokenizer.tokenize(a__ )
self.assertListEqual(a__ ,["""こんにちは""", """、""", """世界""", """。""", """こん""", """##ばんは""", """、""", """世界""", """。"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) ,[3, 12, 10, 14, 4, 9, 12, 10, 14] )
lowercase : Optional[int] = os.path.join(self.tmpdirname ,"""tokenizer.bin""" )
with open(a__ ,"""wb""" ) as handle:
pickle.dump(a__ ,a__ )
with open(a__ ,"""rb""" ) as handle:
lowercase : List[Any] = pickle.load(a__ )
lowercase : str = tokenizer_new.tokenize(a__ )
self.assertListEqual(a__ ,a__ )
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = JumanppTokenizer(do_lower_case=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iphone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = JumanppTokenizer(normalize_text=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""ア""", """ッ""", """フ""", """゚""", """ル""", """ストア""", """で""", """iPhone""", """8""", """\u3000""", """が""", """\u3000""", """\u3000""", """\u3000""", """発売""", """さ""", """れた""", """\u3000""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = JumanppTokenizer(trim_whitespace=a__ )
self.assertListEqual(
tokenizer.tokenize(""" \tアップルストアでiPhone8 が \n 発売された 。 """ ) ,["""アップル""", """ストア""", """で""", """iPhone""", """8""", """が""", """発売""", """さ""", """れた""", """。"""] ,)
@require_jumanpp
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("""ありがとうございますm(_ _)m見つけるのが大変です。""" ) ,["""ありがとう""", """ございます""", """m(_ _)m""", """見つける""", """の""", """が""", """大変です""", """。"""] ,)
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = ["""[UNK]""", """[CLS]""", """[SEP]""", """こんにちは""", """こん""", """にちは""", """ばんは""", """##こん""", """##にちは""", """##ばんは"""]
lowercase : Union[str, Any] = {}
for i, token in enumerate(a__ ):
lowercase : List[str] = i
lowercase : Optional[int] = WordpieceTokenizer(vocab=a__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こんにちは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは""" ) ,["""こん""", """##ばんは"""] )
self.assertListEqual(tokenizer.tokenize("""こんばんは こんばんにちは こんにちは""" ) ,["""こん""", """##ばんは""", """[UNK]""", """こんにちは"""] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Any = BertJapaneseTokenizer.from_pretrained("""nlp-waseda/roberta-base-japanese-with-auto-jumanpp""" )
lowercase : Union[str, Any] = tokenizer.subword_tokenizer
lowercase : Any = subword_tokenizer.tokenize("""国境 の 長い トンネル を 抜ける と 雪国 であった 。""" )
self.assertListEqual(a__ ,["""▁国境""", """▁の""", """▁長い""", """▁トンネル""", """▁を""", """▁抜ける""", """▁と""", """▁雪""", """国""", """▁であった""", """▁。"""] )
lowercase : Optional[int] = subword_tokenizer.tokenize("""こんばんは こんばん にち は こんにちは""" )
self.assertListEqual(a__ ,["""▁こん""", """ばん""", """は""", """▁こん""", """ばん""", """▁に""", """ち""", """▁は""", """▁こんにちは"""] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese""" )
lowercase : Union[str, Any] = tokenizer.encode("""ありがとう。""" ,add_special_tokens=a__ )
lowercase : Optional[Any] = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=a__ )
lowercase : List[str] = tokenizer.build_inputs_with_special_tokens(a__ )
lowercase : Any = tokenizer.build_inputs_with_special_tokens(a__ ,a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_a : Tuple= BertJapaneseTokenizer
_a : Optional[Any]= False
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
super().setUp()
lowercase : Dict = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowercase : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self ,**snake_case ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname ,subword_tokenizer_type="""character""" ,**a__ )
def _SCREAMING_SNAKE_CASE ( self ,snake_case ):
'''simple docstring'''
lowercase : Dict = """こんにちは、世界。 \nこんばんは、世界。"""
lowercase : Optional[Any] = """こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"""
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
pass # TODO add if relevant
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = self.tokenizer_class(self.vocab_file ,subword_tokenizer_type="""character""" )
lowercase : Union[str, Any] = tokenizer.tokenize("""こんにちは、世界。 \nこんばんは、世界。""" )
self.assertListEqual(
a__ ,["""こ""", """ん""", """に""", """ち""", """は""", """、""", """世""", """界""", """。""", """こ""", """ん""", """ば""", """ん""", """は""", """、""", """世""", """界""", """。"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) ,[3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """こ""", """ん""", """に""", """ち""", """は""", """ば""", """世""", """界""", """、""", """。"""]
lowercase : List[Any] = {}
for i, token in enumerate(a__ ):
lowercase : int = i
lowercase : str = CharacterTokenizer(vocab=a__ ,unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) ,[] )
self.assertListEqual(tokenizer.tokenize("""こんにちは""" ) ,["""こ""", """ん""", """に""", """ち""", """は"""] )
self.assertListEqual(tokenizer.tokenize("""こんにちほ""" ) ,["""こ""", """ん""", """に""", """ち""", """[UNK]"""] )
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[str] = self.tokenizer_class.from_pretrained("""cl-tohoku/bert-base-japanese-char""" )
lowercase : List[Any] = tokenizer.encode("""ありがとう。""" ,add_special_tokens=a__ )
lowercase : Union[str, Any] = tokenizer.encode("""どういたしまして。""" ,add_special_tokens=a__ )
lowercase : Optional[int] = tokenizer.build_inputs_with_special_tokens(a__ )
lowercase : int = tokenizer.build_inputs_with_special_tokens(a__ ,a__ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = """cl-tohoku/bert-base-japanese"""
lowercase : Optional[int] = AutoTokenizer.from_pretrained(a__ )
self.assertIsInstance(a__ ,a__ )
class __snake_case ( unittest.TestCase ):
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = """cl-tohoku/bert-base-japanese"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
lowercase : Dict = """bert-base-cased"""
with self.assertLogs("""transformers""" ,level="""WARNING""" ) as cm:
BertJapaneseTokenizer.from_pretrained(a__ )
self.assertTrue(
cm.records[0].message.startswith(
"""The tokenizer class you load from this checkpoint is not the same type as the class this function"""
""" is called from.""" ) )
| 20
|
"""simple docstring"""
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_a : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[Any] = XGLMTokenizer
_UpperCamelCase : List[Any] = XGLMTokenizerFast
_UpperCamelCase : Dict = True
_UpperCamelCase : Tuple = True
def __A ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[str] = """<pad>"""
_lowerCAmelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a__ ) , a__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a__ ) , a__ )
def __A ( self ):
_lowerCAmelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(len(a__ ) , 1008 )
def __A ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __A ( self ):
_lowerCAmelCase : List[Any] = XGLMTokenizer(a__ , keep_accents=a__ )
_lowerCAmelCase : Dict = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(a__ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(a__ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
_lowerCAmelCase : Any = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] , )
_lowerCAmelCase : List[str] = tokenizer.convert_tokens_to_ids(a__ )
self.assertListEqual(
a__ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
_lowerCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(a__ )
self.assertListEqual(
a__ , [
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] , )
@cached_property
def __A ( self ):
return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" )
def __A ( self ):
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(a__ , f.name )
_lowerCAmelCase : Union[str, Any] = XGLMTokenizer(f.name , keep_accents=a__ )
_lowerCAmelCase : List[str] = pickle.dumps(a__ )
pickle.loads(a__ )
def __A ( self ):
if not self.test_rust_tokenizer:
return
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = self.get_rust_tokenizer()
_lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé."""
_lowerCAmelCase : List[Any] = tokenizer.tokenize(a__ )
_lowerCAmelCase : Tuple = rust_tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : Union[str, Any] = tokenizer.encode(a__ , add_special_tokens=a__ )
_lowerCAmelCase : str = rust_tokenizer.encode(a__ , add_special_tokens=a__ )
self.assertListEqual(a__ , a__ )
_lowerCAmelCase : int = self.get_rust_tokenizer()
_lowerCAmelCase : Dict = tokenizer.encode(a__ )
_lowerCAmelCase : List[Any] = rust_tokenizer.encode(a__ )
self.assertListEqual(a__ , a__ )
@slow
def __A ( self ):
_lowerCAmelCase : int = """Hello World!"""
_lowerCAmelCase : Optional[int] = [2, 31227, 4447, 35]
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
_lowerCAmelCase : Any = (
"""This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"""
""" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"""
)
# fmt: off
_lowerCAmelCase : List[str] = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(a__ , self.big_tokenizer.encode(a__ ) )
@slow
def __A ( self ):
# fmt: off
_lowerCAmelCase : List[str] = {
"""input_ids""": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=a__ , model_name="""facebook/xglm-564M""" , padding=a__ , )
| 44
| 0
|
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Optional[int] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
snake_case__ : int = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
# Let's go
snake_case__ : Dict = parser.parse_args()
if not hasattr(__lowerCAmelCase , '''func''' ):
parser.print_help()
exit(1 )
# Run
snake_case__ : Union[str, Any] = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 366
|
from sklearn.metrics import mean_squared_error
import datasets
A__ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
A__ = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
A__ = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def __lowerCamelCase ( self :List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] ,)
def __lowerCamelCase ( self :Tuple ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def __lowerCamelCase ( self :List[str] ,__lowercase :Optional[int] ,__lowercase :int ,__lowercase :Any=None ,__lowercase :List[str]="uniform_average" ,__lowercase :List[Any]=True ):
snake_case__ : Union[str, Any] = mean_squared_error(
__lowercase ,__lowercase ,sample_weight=__lowercase ,multioutput=__lowercase ,squared=__lowercase )
return {"mse": mse}
| 44
| 0
|
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def __snake_case ( _UpperCAmelCase ):
__a = []
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for v in tree.values():
shapes.extend(_fetch_dims(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_UpperCAmelCase ) )
elif isinstance(_UpperCAmelCase , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase ):
__a = []
for d in reversed(_UpperCAmelCase ):
idx.append(flat_idx % d )
__a = flat_idx // d
return tuple(reversed(_UpperCAmelCase ) )
@torch.jit.ignore
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase = None , ):
# start_edges and end_edges both indicate whether, starting from any given
# dimension, the start/end index is at the top/bottom edge of the
# corresponding tensor, modeled as a tree
def reduce_edge_list(_UpperCAmelCase ) -> None:
__a = True
for i in range(len(_UpperCAmelCase ) ):
__a = -1 * (i + 1)
l[reversed_idx] &= tally
__a = l[reversed_idx]
if start_edges is None:
__a = [s == 0 for s in start]
reduce_edge_list(_UpperCAmelCase )
if end_edges is None:
__a = [e == (d - 1) for e, d in zip(_UpperCAmelCase , _UpperCAmelCase )]
reduce_edge_list(_UpperCAmelCase )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_UpperCAmelCase ) == 0:
return [()]
elif len(_UpperCAmelCase ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
__a = []
__a = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_UpperCAmelCase , _UpperCAmelCase ):
if s == e:
path_list.append(slice(_UpperCAmelCase , s + 1 ) )
else:
break
__a = tuple(_UpperCAmelCase )
__a = len(_UpperCAmelCase )
# start == end, and we're done
if divergence_idx == len(_UpperCAmelCase ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__a = start[divergence_idx]
return tuple(
path + (slice(_UpperCAmelCase , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
__a = end[divergence_idx]
return tuple(
path + (slice(_UpperCAmelCase , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
__a = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a = t.shape[:no_batch_dims]
__a = list(_flat_idx_to_idx(_UpperCAmelCase , _UpperCAmelCase ) )
# _get_minimal_slice_set is inclusive
__a = list(_flat_idx_to_idx(flat_end - 1 , _UpperCAmelCase ) )
# Get an ordered list of slices to perform
__a = _get_minimal_slice_set(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
__a = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
if not (len(_UpperCAmelCase ) > 0):
raise ValueError('''Must provide at least one input''' )
__a = [shape[:no_batch_dims] for shape in _fetch_dims(_UpperCAmelCase )]
__a = tuple([max(_UpperCAmelCase ) for s in zip(*_UpperCAmelCase )] )
def _prep_inputs(_UpperCAmelCase ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
__a = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
__a = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
__a = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
__a = tensor_tree_map(_prep_inputs , _UpperCAmelCase )
__a = None
if _out is not None:
__a = tensor_tree_map(lambda _UpperCAmelCase : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
__a = 1
for d in orig_batch_dims:
flat_batch_dim *= d
__a = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(_UpperCAmelCase ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
__a = 0
__a = prepped_outputs
for _ in range(_UpperCAmelCase ):
# Chunk the input
if not low_mem:
__a = _select_chunk
else:
__a = partial(
_chunk_slice , flat_start=_UpperCAmelCase , flat_end=min(_UpperCAmelCase , i + chunk_size ) , no_batch_dims=len(_UpperCAmelCase ) , )
__a = tensor_tree_map(_UpperCAmelCase , _UpperCAmelCase )
# Run the layer on the chunk
__a = layer(**_UpperCAmelCase )
# Allocate space for the output
if out is None:
__a = tensor_tree_map(lambda _UpperCAmelCase : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _UpperCAmelCase )
# Put the chunk in its pre-allocated space
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
def assign(_UpperCAmelCase , _UpperCAmelCase ) -> None:
for k, v in da.items():
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
assign(_UpperCAmelCase , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
__a = da[k]
assign(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
for xa, xa in zip(_UpperCAmelCase , _UpperCAmelCase ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
__a = xa
elif isinstance(_UpperCAmelCase , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
__a = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
__a = tensor_tree_map(lambda _UpperCAmelCase : t.view(orig_batch_dims + t.shape[1:] ) , _UpperCAmelCase )
return out
class _A :
def __init__( self : List[str] , __SCREAMING_SNAKE_CASE : int = 512 , ):
'''simple docstring'''
__a = max_chunk_size
__a = None
__a = None
def _lowerCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : tuple , __SCREAMING_SNAKE_CASE : int):
'''simple docstring'''
logging.info('''Tuning chunk size...''')
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
__a = [2**l for l in range(int(math.log(self.max_chunk_size , 2)) + 1)]
__a = [c for c in candidates if c > min_chunk_size]
__a = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(__SCREAMING_SNAKE_CASE : int) -> bool:
try:
with torch.no_grad():
fn(*__SCREAMING_SNAKE_CASE , chunk_size=__SCREAMING_SNAKE_CASE)
return True
except RuntimeError:
return False
__a = 0
__a = len(__SCREAMING_SNAKE_CASE) - 1
while i > min_viable_chunk_size_index:
__a = test_chunk_size(candidates[i])
if not viable:
__a = (min_viable_chunk_size_index + i) // 2
else:
__a = i
__a = (i + len(__SCREAMING_SNAKE_CASE) - 1) // 2
return candidates[min_viable_chunk_size_index]
def _lowerCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Iterable , __SCREAMING_SNAKE_CASE : Iterable):
'''simple docstring'''
__a = True
for aa, aa in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
assert type(__SCREAMING_SNAKE_CASE) == type(__SCREAMING_SNAKE_CASE)
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple)):
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE: x[0])]
__a = [v for _, v in sorted(aa.items() , key=lambda __SCREAMING_SNAKE_CASE: x[0])]
consistent &= self._compare_arg_caches(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
else:
consistent &= aa == aa
return consistent
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : tuple , __SCREAMING_SNAKE_CASE : int , ):
'''simple docstring'''
__a = True
__a = tree_map(lambda __SCREAMING_SNAKE_CASE: a.shape if isinstance(__SCREAMING_SNAKE_CASE , torch.Tensor) else a , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data) == len(__SCREAMING_SNAKE_CASE)
__a = self._compare_arg_caches(self.cached_arg_data , __SCREAMING_SNAKE_CASE)
else:
# Otherwise, we can reuse the precomputed value
__a = False
if not consistent:
__a = self._determine_favorable_chunk_size(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
__a = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 49
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'hustvl/yolos-small': 'https://huggingface.co/hustvl/yolos-small/resolve/main/config.json',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = "yolos"
def __init__( self : Optional[int], UpperCAmelCase__ : List[Any]=7_6_8, UpperCAmelCase__ : Union[str, Any]=1_2, UpperCAmelCase__ : int=1_2, UpperCAmelCase__ : Any=3_0_7_2, UpperCAmelCase__ : int="gelu", UpperCAmelCase__ : Dict=0.0, UpperCAmelCase__ : List[str]=0.0, UpperCAmelCase__ : Any=0.02, UpperCAmelCase__ : Union[str, Any]=1E-12, UpperCAmelCase__ : Union[str, Any]=[5_1_2, 8_6_4], UpperCAmelCase__ : Optional[Any]=1_6, UpperCAmelCase__ : str=3, UpperCAmelCase__ : Tuple=True, UpperCAmelCase__ : str=1_0_0, UpperCAmelCase__ : Optional[Any]=True, UpperCAmelCase__ : Dict=False, UpperCAmelCase__ : str=1, UpperCAmelCase__ : str=5, UpperCAmelCase__ : Optional[int]=2, UpperCAmelCase__ : int=5, UpperCAmelCase__ : Optional[int]=2, UpperCAmelCase__ : Tuple=0.1, **UpperCAmelCase__ : Any, ):
super().__init__(**UpperCAmelCase__ )
__lowercase = hidden_size
__lowercase = num_hidden_layers
__lowercase = num_attention_heads
__lowercase = intermediate_size
__lowercase = hidden_act
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = image_size
__lowercase = patch_size
__lowercase = num_channels
__lowercase = qkv_bias
__lowercase = num_detection_tokens
__lowercase = use_mid_position_embeddings
__lowercase = auxiliary_loss
# Hungarian matcher
__lowercase = class_cost
__lowercase = bbox_cost
__lowercase = giou_cost
# Loss coefficients
__lowercase = bbox_loss_coefficient
__lowercase = giou_loss_coefficient
__lowercase = eos_coefficient
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
__UpperCAmelCase : Any = version.parse("1.11" )
@property
def _lowercase ( self : Optional[int] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self : List[Any] ):
return 1E-4
@property
def _lowercase ( self : Tuple ):
return 1_2
| 144
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : List[str], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : Dict ):
__lowercase = dataset
__lowercase = process
__lowercase = params
def __len__( self : str ):
return len(self.dataset )
def __getitem__( self : List[Any], UpperCAmelCase__ : int ):
__lowercase = self.dataset[i]
__lowercase = self.process(UpperCAmelCase__, **self.params )
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Optional[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : Any, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : List[Any]=None ):
__lowercase = loader
__lowercase = infer
__lowercase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
__lowercase = None
__lowercase = loader_batch_size
# Internal bookkeeping
__lowercase = None
__lowercase = None
def __len__( self : str ):
return len(self.loader )
def __iter__( self : List[str] ):
__lowercase = iter(self.loader )
return self
def _lowercase ( self : Union[str, Any] ):
if isinstance(self._loader_batch_data, torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
__lowercase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
__lowercase = {}
for k, element in self._loader_batch_data.items():
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
# Convert ModelOutput to tuple first
__lowercase = element.to_tuple()
if isinstance(element[0], torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0], torch.Tensor ):
__lowercase = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0], np.ndarray ):
__lowercase = tuple(np.expand_dims(el[self._loader_batch_index], 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
__lowercase = None
elif isinstance(element[self._loader_batch_index], torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index], np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
__lowercase = np.expand_dims(element[self._loader_batch_index], 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
__lowercase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
__lowercase = self._loader_batch_data.__class__(UpperCAmelCase__ )
self._loader_batch_index += 1
return result
def _lowercase ( self : Tuple ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
__lowercase = next(self.iterator )
__lowercase = self.infer(UpperCAmelCase__, **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = len(UpperCAmelCase__ )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
# Setting internal index to unwrap the batch
__lowercase = processed
__lowercase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : Union[str, Any], UpperCAmelCase__ : Any, UpperCAmelCase__ : List[str], UpperCAmelCase__ : int, UpperCAmelCase__ : str=None ):
super().__init__(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def __iter__( self : str ):
__lowercase = iter(self.loader )
__lowercase = None
return self
def _lowercase ( self : int ):
if self.subiterator is None:
__lowercase = self.infer(next(self.iterator ), **self.params )
try:
# Try to return next item
__lowercase = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
__lowercase = self.infer(next(self.iterator ), **self.params )
__lowercase = next(self.subiterator )
return processed
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __iter__( self : int ):
__lowercase = iter(self.loader )
return self
def _lowercase ( self : List[str] ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
__lowercase = False
__lowercase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
if is_last:
return accumulator
while not is_last:
__lowercase = self.infer(next(self.iterator ), **self.params )
if self.loader_batch_size is not None:
if isinstance(UpperCAmelCase__, torch.Tensor ):
__lowercase = processed
else:
__lowercase = list(processed.keys() )[0]
__lowercase = processed[key]
if isinstance(UpperCAmelCase__, UpperCAmelCase__ ):
__lowercase = len(UpperCAmelCase__ )
else:
__lowercase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
__lowercase = observed_batch_size
__lowercase = processed
__lowercase = 0
while self._loader_batch_index < self.loader_batch_size:
__lowercase = self.loader_batch_item()
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
if is_last:
return accumulator
else:
__lowercase = processed
__lowercase = item.pop("is_last" )
accumulator.append(UpperCAmelCase__ )
return accumulator
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : List[Any], UpperCAmelCase__ : Dataset, UpperCAmelCase__ : str ):
__lowercase = dataset
__lowercase = key
def __len__( self : Optional[Any] ):
return len(self.dataset )
def __getitem__( self : Union[str, Any], UpperCAmelCase__ : Any ):
return self.dataset[i][self.key]
class _lowerCAmelCase ( lowercase ):
"""simple docstring"""
def __init__( self : str, UpperCAmelCase__ : Dataset, UpperCAmelCase__ : str, UpperCAmelCase__ : str ):
__lowercase = dataset
__lowercase = keya
__lowercase = keya
def __len__( self : Optional[int] ):
return len(self.dataset )
def __getitem__( self : Dict, UpperCAmelCase__ : Tuple ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 144
| 1
|
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ):
lowerCAmelCase_ : Union[str, Any] = OmegaConf.load(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Optional[Any] = torch.load(__SCREAMING_SNAKE_CASE ,map_location='''cpu''' )["model"]
lowerCAmelCase_ : Any = list(state_dict.keys() )
# extract state_dict for VQVAE
lowerCAmelCase_ : Optional[Any] = {}
lowerCAmelCase_ : Union[str, Any] = "first_stage_model."
for key in keys:
if key.startswith(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Any = state_dict[key]
# extract state_dict for UNetLDM
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Optional[int] = "model.diffusion_model."
for key in keys:
if key.startswith(__SCREAMING_SNAKE_CASE ):
lowerCAmelCase_ : Any = state_dict[key]
lowerCAmelCase_ : List[Any] = config.model.params.first_stage_config.params
lowerCAmelCase_ : str = config.model.params.unet_config.params
lowerCAmelCase_ : Dict = VQModel(**__SCREAMING_SNAKE_CASE ).eval()
vqvae.load_state_dict(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : str = UNetLDMModel(**__SCREAMING_SNAKE_CASE ).eval()
unet.load_state_dict(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ : Any = DDIMScheduler(
timesteps=config.model.params.timesteps ,beta_schedule='''scaled_linear''' ,beta_start=config.model.params.linear_start ,beta_end=config.model.params.linear_end ,clip_sample=__SCREAMING_SNAKE_CASE ,)
lowerCAmelCase_ : int = LDMPipeline(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
pipeline.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
A__ : str = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', type=str, required=True)
parser.add_argument('''--config_path''', type=str, required=True)
parser.add_argument('''--output_path''', type=str, required=True)
A__ : List[str] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 103
|
"""simple docstring"""
import unittest
from parameterized import parameterized
from transformers import OpenLlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import OpenLlamaForCausalLM, OpenLlamaForSequenceClassification, OpenLlamaModel
class snake_case :
def __init__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : str=1_3 , UpperCamelCase__ : Tuple=7 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=False , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=9_9 , UpperCamelCase__ : Optional[int]=3_2 , UpperCamelCase__ : List[Any]=5 , UpperCamelCase__ : Union[str, Any]=4 , UpperCamelCase__ : List[str]=3_7 , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Dict=5_1_2 , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Any=4 , UpperCamelCase__ : Optional[int]=None , )-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Tuple = parent
__lowerCAmelCase: Optional[int] = batch_size
__lowerCAmelCase: int = seq_length
__lowerCAmelCase: Any = is_training
__lowerCAmelCase: List[Any] = use_input_mask
__lowerCAmelCase: Any = use_token_type_ids
__lowerCAmelCase: Dict = use_labels
__lowerCAmelCase: Union[str, Any] = vocab_size
__lowerCAmelCase: Union[str, Any] = hidden_size
__lowerCAmelCase: int = num_hidden_layers
__lowerCAmelCase: List[Any] = num_attention_heads
__lowerCAmelCase: int = intermediate_size
__lowerCAmelCase: Optional[Any] = hidden_act
__lowerCAmelCase: Optional[Any] = hidden_dropout_prob
__lowerCAmelCase: Optional[int] = attention_probs_dropout_prob
__lowerCAmelCase: Any = max_position_embeddings
__lowerCAmelCase: Optional[int] = type_vocab_size
__lowerCAmelCase: str = type_sequence_label_size
__lowerCAmelCase: int = initializer_range
__lowerCAmelCase: Dict = num_labels
__lowerCAmelCase: Dict = num_choices
__lowerCAmelCase: str = scope
def lowercase_ ( self : Optional[Any])-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase: List[Any] = None
if self.use_input_mask:
__lowerCAmelCase: int = random_attention_mask([self.batch_size, self.seq_length])
__lowerCAmelCase: Dict = None
if self.use_token_type_ids:
__lowerCAmelCase: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase: str = None
__lowerCAmelCase: Any = None
__lowerCAmelCase: Dict = None
if self.use_labels:
__lowerCAmelCase: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase: Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase: Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase: Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : str)-> Dict:
'''simple docstring'''
return OpenLlamaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , use_stable_embedding=UpperCamelCase__ , )
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any])-> str:
'''simple docstring'''
__lowerCAmelCase: List[Any] = OpenLlamaModel(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
__lowerCAmelCase: int = model(UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = True
__lowerCAmelCase: Any = OpenLlamaModel(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: List[Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , )
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowercase_ ( self : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , )-> Optional[int]:
'''simple docstring'''
__lowerCAmelCase: str = OpenLlamaForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowercase_ ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , )-> Tuple:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = True
__lowerCAmelCase: Dict = True
__lowerCAmelCase: Union[str, Any] = OpenLlamaForCausalLM(config=UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
# first forward pass
__lowerCAmelCase: Optional[int] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size)
__lowerCAmelCase: Optional[int] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
__lowerCAmelCase: str = torch.cat([input_ids, next_tokens] , dim=-1)
__lowerCAmelCase: List[str] = torch.cat([input_mask, next_mask] , dim=-1)
__lowerCAmelCase: Union[str, Any] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
__lowerCAmelCase: List[str] = model(
UpperCamelCase__ , attention_mask=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ , output_hidden_states=UpperCamelCase__ , )["hidden_states"][0]
# select random slice
__lowerCAmelCase: List[str] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__lowerCAmelCase: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCAmelCase: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3))
def lowercase_ ( self : Tuple)-> str:
'''simple docstring'''
__lowerCAmelCase: Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
): List[Any] = config_and_inputs
__lowerCAmelCase: Any = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class snake_case ( __snake_case, __snake_case, __snake_case, unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Tuple = (
(OpenLlamaModel, OpenLlamaForCausalLM, OpenLlamaForSequenceClassification) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ : Any = (OpenLlamaForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[int] = (
{
"""feature-extraction""": OpenLlamaModel,
"""text-classification""": OpenLlamaForSequenceClassification,
"""text-generation""": OpenLlamaForCausalLM,
"""zero-shot""": OpenLlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : List[Any] = False
def lowercase_ ( self : Dict)-> Union[str, Any]:
'''simple docstring'''
__lowerCAmelCase: int = OpenLlamaModelTester(self)
__lowerCAmelCase: Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=3_7)
def lowercase_ ( self : List[str])-> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any])-> List[Any]:
'''simple docstring'''
__lowerCAmelCase: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : int)-> str:
'''simple docstring'''
__lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCAmelCase: Union[str, Any] = type
self.model_tester.create_and_check_model(*UpperCamelCase__)
def lowercase_ ( self : Tuple)-> Optional[Any]:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Dict = 3
__lowerCAmelCase: Optional[Any] = input_dict["input_ids"]
__lowerCAmelCase: Optional[Any] = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Union[str, Any] = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[int] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowercase_ ( self : Dict)-> Tuple:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: int = 3
__lowerCAmelCase: Dict = "single_label_classification"
__lowerCAmelCase: str = input_dict["input_ids"]
__lowerCAmelCase: Tuple = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Tuple = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__lowerCAmelCase: List[Any] = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Optional[Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def lowercase_ ( self : Optional[int])-> Any:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Tuple = 3
__lowerCAmelCase: Any = "multi_label_classification"
__lowerCAmelCase: str = input_dict["input_ids"]
__lowerCAmelCase: Optional[int] = input_ids.ne(1).to(UpperCamelCase__)
__lowerCAmelCase: Optional[int] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__lowerCAmelCase: Dict = OpenLlamaForSequenceClassification(UpperCamelCase__)
model.to(UpperCamelCase__)
model.eval()
__lowerCAmelCase: Union[str, Any] = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@unittest.skip("Open-Llama buffers include complex numbers, which breaks this test")
def lowercase_ ( self : Any)-> Tuple:
'''simple docstring'''
pass
@parameterized.expand([("linear",), ("dynamic",)])
def lowercase_ ( self : Any , UpperCamelCase__ : List[str])-> Dict:
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase: Any = ids_tensor([1, 1_0] , config.vocab_size)
__lowerCAmelCase: Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: List[Any] = OpenLlamaModel(UpperCamelCase__)
original_model.to(UpperCamelCase__)
original_model.eval()
__lowerCAmelCase: int = original_model(UpperCamelCase__).last_hidden_state
__lowerCAmelCase: str = original_model(UpperCamelCase__).last_hidden_state
set_seed(4_2) # Fixed seed at init time so the two models get the same random weights
__lowerCAmelCase: Dict = {"type": scaling_type, "factor": 10.0}
__lowerCAmelCase: List[str] = OpenLlamaModel(UpperCamelCase__)
scaled_model.to(UpperCamelCase__)
scaled_model.eval()
__lowerCAmelCase: Dict = scaled_model(UpperCamelCase__).last_hidden_state
__lowerCAmelCase: Any = scaled_model(UpperCamelCase__).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
else:
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-5))
| 217
| 0
|
'''simple docstring'''
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__a = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class A__ :
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = PegasusConfig
UpperCamelCase_ : Any = {}
UpperCamelCase_ : Optional[Any] = 'gelu'
def __init__( self : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : List[str]=1_3 , lowerCAmelCase__ : Dict=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : int=False , lowerCAmelCase__ : str=9_9 , lowerCAmelCase__ : Optional[Any]=3_2 , lowerCAmelCase__ : str=5 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Union[str, Any]=3_7 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : Optional[int]=2_0 , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : Dict=0 , ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = parent
_UpperCAmelCase : Optional[int] = batch_size
_UpperCAmelCase : int = seq_length
_UpperCAmelCase : Any = is_training
_UpperCAmelCase : Dict = use_labels
_UpperCAmelCase : Tuple = vocab_size
_UpperCAmelCase : int = hidden_size
_UpperCAmelCase : Any = num_hidden_layers
_UpperCAmelCase : Dict = num_attention_heads
_UpperCAmelCase : List[str] = intermediate_size
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : List[str] = max_position_embeddings
_UpperCAmelCase : Any = eos_token_id
_UpperCAmelCase : List[str] = pad_token_id
_UpperCAmelCase : int = bos_token_id
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase : Tuple = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase : Optional[Any] = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase : Tuple = prepare_pegasus_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int] ) -> str:
"""simple docstring"""
_UpperCAmelCase : Any = 2_0
_UpperCAmelCase : str = model_class_name(__lowerCAmelCase )
_UpperCAmelCase : Tuple = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : List[Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : Dict = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
_UpperCAmelCase : List[str] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[Any] = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase : int = model.decode(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = 2_0
_UpperCAmelCase : List[str] = model_class_name(__lowerCAmelCase )
_UpperCAmelCase : Tuple = model.encode(inputs_dict["input_ids"] )
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
_UpperCAmelCase : int = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase : str = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase : str = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
_UpperCAmelCase : List[str] = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
_UpperCAmelCase : List[Any] = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
_UpperCAmelCase : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def __UpperCAmelCase ( a_: List[str], a_: str, a_: Tuple, a_: Tuple=None, a_: Any=None, ):
if attention_mask is None:
_UpperCAmelCase : Optional[int] = np.not_equal(lowerCAmelCase__, config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase : Tuple = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape, dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:], config.pad_token_id ).astype(np.inta ),
], axis=-1, )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase_ : Tuple = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : int = False
UpperCamelCase_ : List[Any] = False
UpperCamelCase_ : List[str] = False
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = FlaxPegasusModelTester(self )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=__lowerCAmelCase )
def _lowerCAmelCase ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : List[str] = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
_UpperCAmelCase : str = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[Any]=None , **lowerCAmelCase__ : str ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : Optional[Any] = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : str = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _lowerCAmelCase ( self : Dict ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase : str = model_class(__lowerCAmelCase )
_UpperCAmelCase : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
_UpperCAmelCase : List[Any] = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any] ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("JIT Enabled" ):
_UpperCAmelCase : int = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
_UpperCAmelCase : str = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCAmelCase : str = model_class_name.from_pretrained("google/pegasus-large" , from_pt=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = np.ones((1, 1) )
_UpperCAmelCase : List[Any] = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
@slow
def _lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
_UpperCAmelCase : Tuple = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
_UpperCAmelCase : Dict = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
_UpperCAmelCase : Optional[int] = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
_UpperCAmelCase : List[Any] = tokenizer(__lowerCAmelCase , return_tensors="np" , truncation=__lowerCAmelCase , max_length=5_1_2 , padding=__lowerCAmelCase )
_UpperCAmelCase : Optional[int] = model.generate(**__lowerCAmelCase , num_beams=2 ).sequences
_UpperCAmelCase : int = tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
assert tgt_text == decoded
| 359
|
'''simple docstring'''
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class A__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] ) -> str:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : List[str] = model
_UpperCAmelCase : Dict = 2
_UpperCAmelCase : Tuple = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
def __UpperCAmelCase ( a_: str, a_: str, a_: str ):
# load longformer model from model identifier
_UpperCAmelCase : int = LongformerModel.from_pretrained(a_ )
_UpperCAmelCase : Any = LightningModel(a_ )
_UpperCAmelCase : int = torch.load(a_, map_location=torch.device("cpu" ) )
lightning_model.load_state_dict(ckpt["state_dict"] )
# init longformer question answering model
_UpperCAmelCase : List[str] = LongformerForQuestionAnswering.from_pretrained(a_ )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(a_ )
print(f"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--longformer_model',
default=None,
type=str,
required=True,
help='model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.',
)
parser.add_argument(
'--longformer_question_answering_ckpt_path',
default=None,
type=str,
required=True,
help='Path the official PyTorch Lightning Checkpoint.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__a = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
)
| 17
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.