code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any , __A : Any , __A : Dict=1_3 , __A : List[Any]=1_0 , __A : List[Any]=3 , __A : List[str]=2 , __A : Union[str, Any]=2 , __A : Union[str, Any]=True , __A : Optional[int]=True , __A : List[Any]=3_2 , __A : Optional[Any]=5 , __A : Dict=4 , __A : int=3_7 , __A : str="gelu" , __A : str=0.1 , __A : List[Any]=0.1 , __A : Any=1_0 , __A : str=0.0_2 , __A : Union[str, Any]="divided_space_time" , __A : Any=None , ):
snake_case__ : Tuple = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Tuple = num_channels
snake_case__ : int = patch_size
snake_case__ : str = num_frames
snake_case__ : str = is_training
snake_case__ : Dict = use_labels
snake_case__ : Optional[int] = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : Union[str, Any] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : Any = attention_probs_dropout_prob
snake_case__ : Tuple = attention_type
snake_case__ : Any = initializer_range
snake_case__ : Optional[int] = scope
snake_case__ : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
snake_case__ : Any = (image_size // patch_size) ** 2
snake_case__ : List[str] = (num_frames) * self.num_patches_per_frame + 1
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case__ : List[str] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : List[str] ):
snake_case__ : int = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
snake_case__ : Tuple = self.num_labels
return config
def _lowercase ( self : Optional[int] , __A : Union[str, Any] , __A : int , __A : Tuple ):
snake_case__ : Dict = TimesformerModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : List[str] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Any , __A : List[str] , __A : Union[str, Any] , __A : Union[str, Any] ):
snake_case__ : Optional[int] = TimesformerForVideoClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(__A )
# verify the logits shape
snake_case__ : Tuple = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __A )
def _lowercase ( self : Dict ):
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : Union[str, Any] = config_and_inputs
snake_case__ : str = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
a_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
a_ = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Tuple ):
snake_case__ : Union[str, Any] = TimesformerModelTester(self )
snake_case__ : Tuple = ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : str , __A : Any , __A : List[Any] , __A : str=False ):
snake_case__ : Tuple = copy.deepcopy(__A )
if return_labels:
if model_class in get_values(__A ):
snake_case__ : Union[str, Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _lowercase ( self : List[Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _lowercase ( self : Optional[Any] ):
pass
def _lowercase ( self : str ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Tuple = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowercase ( self : str ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__A )
snake_case__ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : List[str] = [*signature.parameters.keys()]
snake_case__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Dict ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__A )
@slow
def _lowercase ( self : List[Any] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[Any] = TimesformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _lowercase ( self : List[Any] ):
if not self.has_attentions:
pass
else:
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
for model_class in self.all_model_classes:
snake_case__ : List[Any] = self.model_tester.seq_length
snake_case__ : List[Any] = self.model_tester.num_frames
snake_case__ : List[str] = True
snake_case__ : Union[str, Any] = False
snake_case__ : List[str] = True
snake_case__ : List[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : Optional[Any] = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Optional[Any] = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Dict = True
snake_case__ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : List[Any] = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
snake_case__ : Dict = len(__A )
# Check attention is always last and order is fine
snake_case__ : Dict = True
snake_case__ : Any = True
snake_case__ : int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
snake_case__ : Optional[Any] = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowercase ( self : int ):
def check_hidden_states_output(__A : str , __A : Optional[int] , __A : int ):
snake_case__ : List[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : str = outputs.hidden_states
snake_case__ : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__A ) , __A )
snake_case__ : Optional[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[str] = True
check_hidden_states_output(__A , __A , __A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : List[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
snake_case__ : Dict = np.load(_UpperCamelCase )
return list(_UpperCamelCase )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Optional[Any] ):
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : List[Any] ):
snake_case__ : Any = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__A )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : List[str] = prepare_video()
snake_case__ : int = image_processor(video[:8] , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )
# verify the logits
snake_case__ : Any = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : Any = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 297
|
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowerCAmelCase ( _a ):
def __init__(self , __magic_name__ = "▁" , __magic_name__ = True , __magic_name__ = "<unk>" , __magic_name__ = "</s>" , __magic_name__ = "<pad>" , ) -> Dict:
'''simple docstring'''
snake_case_ : List[Any] = {
'''pad''': {'''id''': 0, '''token''': pad_token},
'''eos''': {'''id''': 1, '''token''': eos_token},
'''unk''': {'''id''': 2, '''token''': unk_token},
}
snake_case_ : List[str] = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
snake_case_ : int = token_dict['''token''']
snake_case_ : Optional[int] = Tokenizer(Unigram() )
snake_case_ : int = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ),
normalizers.Lowercase(),
] )
snake_case_ : Optional[int] = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ ),
pre_tokenizers.Digits(individual_digits=__magic_name__ ),
pre_tokenizers.Punctuation(),
] )
snake_case_ : Tuple = decoders.Metaspace(replacement=__magic_name__ , add_prefix_space=__magic_name__ )
snake_case_ : Optional[Any] = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , )
snake_case_ : Optional[Any] = {
'''model''': '''SentencePieceUnigram''',
'''replacement''': replacement,
'''add_prefix_space''': add_prefix_space,
}
super().__init__(__magic_name__ , __magic_name__ )
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> List[str]:
'''simple docstring'''
snake_case_ : Tuple = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
if isinstance(__magic_name__ , __magic_name__ ):
snake_case_ : Dict = [files]
self._tokenizer.train(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self , __magic_name__ , __magic_name__ = 8000 , __magic_name__ = True , ) -> int:
'''simple docstring'''
snake_case_ : Any = trainers.UnigramTrainer(
vocab_size=__magic_name__ , special_tokens=self.special_tokens_list , show_progress=__magic_name__ , )
self._tokenizer.train_from_iterator(__magic_name__ , trainer=__magic_name__ )
self.add_unk_id()
def lowerCamelCase (self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Tuple = json.loads(self._tokenizer.to_str() )
snake_case_ : Union[str, Any] = self.special_tokens['''unk''']['''id''']
snake_case_ : Tuple = Tokenizer.from_str(json.dumps(__magic_name__ ) )
| 60
| 0
|
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __magic_name__ :
'''simple docstring'''
def __init__( self , _a , _a=13 , _a=3 , _a=True , _a=True , _a=0.1 , _a=0.1 , _a=224 , _a=1_000 , _a=[3, 3, 6, 4] , _a=[48, 56, 112, 220] , ):
"""simple docstring"""
lowerCamelCase = parent
lowerCamelCase = batch_size
lowerCamelCase = num_channels
lowerCamelCase = is_training
lowerCamelCase = use_labels
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = num_labels
lowerCamelCase = image_size
lowerCamelCase = layer_depths
lowerCamelCase = embed_dims
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = None
if self.use_labels:
lowerCamelCase = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self ):
"""simple docstring"""
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=_UpperCAmelCase , layer_scale_init_value=1e-5 , )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = SwiftFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _lowerCAmelCase ( self , _a , _a , _a ):
"""simple docstring"""
lowerCamelCase = self.num_labels
lowerCamelCase = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
lowerCamelCase = SwiftFormerForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) = self.prepare_config_and_inputs()
lowerCamelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__UpperCamelCase = (
{"feature-extraction": SwiftFormerModel, "image-classification": SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = SwiftFormerModelTester(self )
lowerCamelCase = ConfigTester(
self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _lowerCAmelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_UpperCAmelCase )
lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = model_class(_UpperCAmelCase )
lowerCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase = [*signature.parameters.keys()]
lowerCamelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase = SwiftFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def _lowerCAmelCase ( self ):
"""simple docstring"""
def check_hidden_states_output(_a , _a , _a ):
lowerCamelCase = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
lowerCamelCase = outputs.hidden_states
lowerCamelCase = 8
self.assertEqual(len(_UpperCAmelCase ) , _UpperCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(_UpperCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase = True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def _lowerCAmelCase ( self ):
"""simple docstring"""
def _config_zero_init(_a ):
lowerCamelCase = copy.deepcopy(_UpperCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(_UpperCAmelCase , _UpperCAmelCase , 1e-1_0 )
if isinstance(getattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , _UpperCAmelCase ):
lowerCamelCase = _config_zero_init(getattr(_UpperCAmelCase , _UpperCAmelCase ) )
setattr(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return configs_no_init
lowerCamelCase , lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCamelCase = model_class(config=_UpperCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _lowerCAmelCase ( self ):
"""simple docstring"""
pass
def a__ ( ) -> Optional[int]:
lowerCamelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowerCAmelCase ( self ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(_UpperCAmelCase )
lowerCamelCase = self.default_image_processor
lowerCamelCase = prepare_img()
lowerCamelCase = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
lowerCamelCase = model(**_UpperCAmelCase )
# verify the logits
lowerCamelCase = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
lowerCamelCase = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 714
|
"""simple docstring"""
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
lowerCAmelCase : List[str] = HfApi()
lowerCAmelCase : Tuple = {}
# fmt: off
lowerCAmelCase : List[str] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
lowerCAmelCase : Dict = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
lowerCAmelCase : str = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
lowerCAmelCase : List[Any] = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
lowerCAmelCase : int = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
lowerCAmelCase : int = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
lowerCAmelCase : List[str] = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
lowerCAmelCase : Optional[int] = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
lowerCAmelCase : Any = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
lowerCAmelCase : Union[str, Any] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
lowerCAmelCase : str = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
lowerCAmelCase : List[str] = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
lowerCAmelCase : Optional[Any] = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
lowerCAmelCase : int = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
lowerCAmelCase : Optional[int] = api.list_models(filter="""diffusers""")
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
lowerCAmelCase : Dict = """/home/patrick/google_checkpoints/""" + mod.modelId.split("""/""")[-1]
print(F"""Started running {mod.modelId}!!!""")
if mod.modelId.startswith("""CompVis"""):
lowerCAmelCase : str = UNetaDModel.from_pretrained(local_checkpoint, subfolder="""unet""")
else:
lowerCAmelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
lowerCAmelCase : Dict = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
lowerCAmelCase : List[Any] = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
lowerCAmelCase : Optional[int] = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results["""_""".join("""_""".join(mod.modelId.split("""/""")).split("""-"""))], atol=1e-3
)
print(F"""{mod.modelId} has passed successfully!!!""")
| 533
| 0
|
'''simple docstring'''
import argparse
import hashlib
import os
import urllib
import warnings
import torch
from torch import nn
from tqdm import tqdm
from transformers import WhisperConfig, WhisperForConditionalGeneration
lowercase_ = {
"tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt",
"tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt",
"base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt",
"base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt",
"small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt",
"small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt",
"medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt",
"medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt",
"large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt",
"large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = ['''layers''', '''blocks''']
for k in ignore_keys:
state_dict.pop(__A , __A)
lowercase_ = {
"blocks": "layers",
"mlp.0": "fc1",
"mlp.2": "fc2",
"mlp_ln": "final_layer_norm",
".attn.query": ".self_attn.q_proj",
".attn.key": ".self_attn.k_proj",
".attn.value": ".self_attn.v_proj",
".attn_ln": ".self_attn_layer_norm",
".attn.out": ".self_attn.out_proj",
".cross_attn.query": ".encoder_attn.q_proj",
".cross_attn.key": ".encoder_attn.k_proj",
".cross_attn.value": ".encoder_attn.v_proj",
".cross_attn_ln": ".encoder_attn_layer_norm",
".cross_attn.out": ".encoder_attn.out_proj",
"decoder.ln.": "decoder.layer_norm.",
"encoder.ln.": "encoder.layer_norm.",
"token_embedding": "embed_tokens",
"encoder.positional_embedding": "encoder.embed_positions.weight",
"decoder.positional_embedding": "decoder.embed_positions.weight",
"ln_post": "layer_norm",
}
def lowerCAmelCase (__A):
"""simple docstring"""
_a = list(s_dict.keys())
for key in keys:
_a = key
for k, v in WHISPER_MAPPING.items():
if k in key:
_a = new_key.replace(__A , __A)
print(F'''{key} -> {new_key}''')
_a = s_dict.pop(__A)
return s_dict
def lowerCAmelCase (__A):
"""simple docstring"""
_a , _a = emb.weight.shape
_a = nn.Linear(__A , __A , bias=__A)
_a = emb.weight.data
return lin_layer
def lowerCAmelCase (__A , __A):
"""simple docstring"""
os.makedirs(__A , exist_ok=__A)
_a = os.path.basename(__A)
_a = url.split('''/''')[-2]
_a = os.path.join(__A , __A)
if os.path.exists(__A) and not os.path.isfile(__A):
raise RuntimeError(F'''{download_target} exists and is not a regular file''')
if os.path.isfile(__A):
_a = open(__A , '''rb''').read()
if hashlib.shaaaa(__A).hexdigest() == expected_shaaaa:
return model_bytes
else:
warnings.warn(F'''{download_target} exists, but the SHA256 checksum does not match; re-downloading the file''')
with urllib.request.urlopen(__A) as source, open(__A , '''wb''') as output:
with tqdm(
total=int(source.info().get('''Content-Length''')) , ncols=80 , unit='''iB''' , unit_scale=__A , unit_divisor=1_024) as loop:
while True:
_a = source.read(8_192)
if not buffer:
break
output.write(__A)
loop.update(len(__A))
_a = open(__A , '''rb''').read()
if hashlib.shaaaa(__A).hexdigest() != expected_shaaaa:
raise RuntimeError(
'''Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.''')
return model_bytes
def lowerCAmelCase (__A , __A):
"""simple docstring"""
if ".pt" not in checkpoint_path:
_a = _download(_MODELS[checkpoint_path])
else:
_a = torch.load(__A , map_location='''cpu''')
_a = original_checkpoint['''dims''']
_a = original_checkpoint['''model_state_dict''']
_a = state_dict['''decoder.token_embedding.weight''']
remove_ignore_keys_(__A)
rename_keys(__A)
_a = True
_a = state_dict['''decoder.layers.0.fc1.weight'''].shape[0]
_a = WhisperConfig(
vocab_size=dimensions['''n_vocab'''] , encoder_ffn_dim=__A , decoder_ffn_dim=__A , num_mel_bins=dimensions['''n_mels'''] , d_model=dimensions['''n_audio_state'''] , max_target_positions=dimensions['''n_text_ctx'''] , encoder_layers=dimensions['''n_audio_layer'''] , encoder_attention_heads=dimensions['''n_audio_head'''] , decoder_layers=dimensions['''n_text_layer'''] , decoder_attention_heads=dimensions['''n_text_state'''] , max_source_positions=dimensions['''n_audio_ctx'''] , )
_a = WhisperForConditionalGeneration(__A)
_a , _a = model.model.load_state_dict(__A , strict=__A)
if len(__A) > 0 and not set(__A) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
'''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,'''
F''' but all the following weights are missing {missing}''')
if tie_embeds:
_a = make_linear_from_emb(model.model.decoder.embed_tokens)
else:
_a = proj_out_weights
model.save_pretrained(__A)
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# # Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints")
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
lowercase_ = parser.parse_args()
convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
| 11
|
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
snake_case : Tuple = 2_0_4_8
snake_case : str = 4_0_9_6
snake_case : int = 4_2
snake_case : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
snake_case : List[str] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def A ( __snake_case: Any ) -> Optional[int]:
"""simple docstring"""
def choose_first(__snake_case: Union[str, Any] , __snake_case: List[str]=False ):
assert isinstance(__snake_case , __snake_case )
if len(__snake_case ) == 1:
__magic_name__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
__magic_name__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
__magic_name__ = {'id': example['id']}
__magic_name__ = example['annotations']
__magic_name__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
__magic_name__ = ['yes'] if 1 in yes_no_answer else ['no']
__magic_name__ = __magic_name__ = []
__magic_name__ = __magic_name__ = []
__magic_name__ = ['<cls>']
else:
__magic_name__ = ['short']
__magic_name__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
__magic_name__ = ['long']
__magic_name__ = choose_first(annotation['long_answer'] , is_long_answer=__snake_case )
__magic_name__ = []
answer.update(__snake_case )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
__magic_name__ = True
else:
__magic_name__ = False
__magic_name__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , __snake_case ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def A ( __snake_case: Any , __snake_case: str=False ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ = _get_single_answer(__snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ = example['document']['tokens']
__magic_name__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": -1_0_0, # ignore index in cross-entropy
"end_token": -1_0_0, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
__magic_name__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
__magic_name__ = example['document']['tokens']
__magic_name__ = answer['start_token']
__magic_name__ = answer['end_token']
__magic_name__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
__magic_name__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
__magic_name__ = doc['is_html'][answer['start_token'] : answer['end_token']]
__magic_name__ = doc['token'][answer['start_token'] : answer['end_token']]
__magic_name__ = ' '.join([old[i] for i in range(len(__snake_case ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , __snake_case , end='\n' )
print('Old:' , __snake_case , end='\n\n' )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def A ( __snake_case: List[Any] , __snake_case: int , __snake_case: Tuple=2_0_4_8 , __snake_case: List[str]=4_0_9_6 , __snake_case: Optional[int]=True ) -> Any:
"""simple docstring"""
__magic_name__ = get_context_and_ans(__snake_case , assertion=__snake_case )
__magic_name__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
__magic_name__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
__magic_name__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
__magic_name__ = []
__magic_name__ = []
__magic_name__ = input_ids[:q_len]
__magic_name__ = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
__magic_name__ = i + max_length - q_len
__magic_name__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-1_0_0] * len(__snake_case ),
"end_token": [-1_0_0] * len(__snake_case ),
"category": category,
},
}
__magic_name__ = out['context'].split()
__magic_name__ = splitted_context[answer['end_token']]
__magic_name__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=__snake_case , ).input_ids )
__magic_name__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=__snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
__magic_name__ = len(tokenizer(__snake_case , add_special_tokens=__snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
__magic_name__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
__magic_name__ = answer['start_token']
__magic_name__ = answer['end_token']
if assertion:
__magic_name__ = tokenizer.decode(__snake_case )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , __snake_case , end='\n\n' )
if len(__snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
__magic_name__ = input_ids[:q_len]
__magic_name__ = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
__magic_name__ = []
__magic_name__ = []
__magic_name__ = []
__magic_name__ = [] # null, yes, no, long, short
for i in doc_start_indices:
__magic_name__ = i + max_length - q_len
__magic_name__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
__magic_name__ = start_token - i + q_len
__magic_name__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
__magic_name__ = -1_0_0
__magic_name__ = -1_0_0
answers_category.append('null' )
__magic_name__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__snake_case )
answers_end_token.append(__snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(__snake_case ) )
print('Old:' , tokenizer.decode(__snake_case ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def A ( __snake_case: Tuple , __snake_case: Dict , __snake_case: Any=2_0_4_8 , __snake_case: List[Any]=4_0_9_6 , __snake_case: Any=False ) -> Tuple:
"""simple docstring"""
__magic_name__ = get_strided_contexts_and_ans(
__snake_case , __snake_case , doc_stride=__snake_case , max_length=__snake_case , assertion=__snake_case , )
return example
def A ( __snake_case: Optional[int] , __snake_case: Union[str, Any] ) -> Dict:
"""simple docstring"""
with jsonlines.open(__snake_case , 'a' ) as writer:
for example in tqdm(__snake_case , total=len(__snake_case ) , desc='Saving samples ... ' ):
__magic_name__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
snake_case : Any = load_dataset("""natural_questions""")
snake_case : List[Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
snake_case : Tuple = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
snake_case : List[Any] = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
snake_case : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
snake_case : int = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
snake_case : Union[str, Any] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 545
| 0
|
'''simple docstring'''
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
A_ = "hf-internal-testing/tiny-random-bert"
A_ = os.path.join(TRANSFORMERS_CACHE, "models--hf-internal-testing--tiny-random-bert")
A_ = "9b8c223d42b2188cb49d29af482996f9d0f3e5a6"
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE:int = cached_file(UpperCamelCase_ ,UpperCamelCase_ )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCamelCase_ ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCamelCase_ ,UpperCamelCase_ ) ) )
with open(os.path.join(UpperCamelCase_ ,"refs" ,"main" ) ) as f:
SCREAMING_SNAKE_CASE:Tuple = f.read()
self.assertEqual(UpperCamelCase_ ,os.path.join(UpperCamelCase_ ,"snapshots" ,UpperCamelCase_ ,UpperCamelCase_ ) )
self.assertTrue(os.path.isfile(UpperCamelCase_ ) )
# File is cached at the same place the second time.
SCREAMING_SNAKE_CASE:int = cached_file(UpperCamelCase_ ,UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ ,UpperCamelCase_ )
# Using a specific revision to test the full commit hash.
SCREAMING_SNAKE_CASE:Optional[Any] = cached_file(UpperCamelCase_ ,UpperCamelCase_ ,revision="9b8c223" )
self.assertEqual(UpperCamelCase_ ,os.path.join(UpperCamelCase_ ,"snapshots" ,UpperCamelCase_ ,UpperCamelCase_ ) )
def __UpperCamelCase ( self : Dict ):
with self.assertRaisesRegex(UpperCamelCase_ ,"is not a valid model identifier" ):
SCREAMING_SNAKE_CASE:Tuple = cached_file("tiny-random-bert" ,UpperCamelCase_ )
with self.assertRaisesRegex(UpperCamelCase_ ,"is not a valid git identifier" ):
SCREAMING_SNAKE_CASE:Optional[Any] = cached_file(UpperCamelCase_ ,UpperCamelCase_ ,revision="aaaa" )
with self.assertRaisesRegex(UpperCamelCase_ ,"does not appear to have a file named" ):
SCREAMING_SNAKE_CASE:str = cached_file(UpperCamelCase_ ,"conf" )
def __UpperCamelCase ( self : Optional[int] ):
with self.assertRaisesRegex(UpperCamelCase_ ,"does not appear to have a file named" ):
SCREAMING_SNAKE_CASE:int = cached_file(UpperCamelCase_ ,"conf" )
with open(os.path.join(UpperCamelCase_ ,"refs" ,"main" ) ) as f:
SCREAMING_SNAKE_CASE:Dict = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCamelCase_ ,".no_exist" ,UpperCamelCase_ ,"conf" ) ) )
SCREAMING_SNAKE_CASE:Optional[int] = cached_file(UpperCamelCase_ ,"conf" ,_raise_exceptions_for_missing_entries=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
SCREAMING_SNAKE_CASE:Optional[Any] = cached_file(UpperCamelCase_ ,"conf" ,local_files_only=UpperCamelCase_ ,_raise_exceptions_for_missing_entries=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
SCREAMING_SNAKE_CASE:Optional[int] = mock.Mock()
SCREAMING_SNAKE_CASE:Optional[Any] = 500
SCREAMING_SNAKE_CASE:List[Any] = {}
SCREAMING_SNAKE_CASE:Optional[int] = HTTPError
SCREAMING_SNAKE_CASE:int = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" ,return_value=UpperCamelCase_ ) as mock_head:
SCREAMING_SNAKE_CASE:Any = cached_file(UpperCamelCase_ ,"conf" ,_raise_exceptions_for_connection_errors=UpperCamelCase_ )
self.assertIsNone(UpperCamelCase_ )
# This check we did call the fake head request
mock_head.assert_called()
def __UpperCamelCase ( self : Dict ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" ,UpperCamelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" ,UpperCamelCase_ ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" ,UpperCamelCase_ ) )
def __UpperCamelCase ( self : str ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" ,"ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCamelCase_ ,"is not a valid model identifier" ):
get_file_from_repo("bert-base-case" ,UpperCamelCase_ )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCamelCase_ ,"is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" ,UpperCamelCase_ ,revision="ahaha" )
SCREAMING_SNAKE_CASE:Dict = get_file_from_repo("bert-base-cased" ,UpperCamelCase_ )
# The name is the cached name which is not very easy to test, so instead we load the content.
SCREAMING_SNAKE_CASE:Any = json.loads(open(UpperCamelCase_ ,"r" ).read() )
self.assertEqual(config["hidden_size"] ,768 )
def __UpperCamelCase ( self : str ):
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE:str = Path(UpperCamelCase_ ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCamelCase_ ,"a.txt" ) ,str(UpperCamelCase_ ) )
self.assertIsNone(get_file_from_repo(UpperCamelCase_ ,"b.txt" ) )
| 714
|
'''simple docstring'''
# HF Trainer benchmarking tool
#
# This tool can be used to run and compare multiple dimensions of the HF Trainers args.
#
# It then prints a report once in github format with all the information that needs to be shared
# with others and second time in a console-friendly format, so it's easier to use for tuning things up.
#
# The main idea is:
#
# ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \
# --target-metric-key train_samples_per_second
#
# The variations can be any command line argument that you want to compare and not just dtype as in
# the example.
#
# --variations allows you to compare variations in multiple dimensions.
#
# as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6
# times adding one of:
#
# 1. --tf32 0 --fp16 0
# 2. --tf32 0 --fp16 1
# 3. --tf32 0 --bf16 1
# 4. --tf32 1 --fp16 0
# 5. --tf32 1 --fp16 1
# 6. --tf32 1 --bf16 1
#
# and print the results. This is just a cartesian product - and more than 2 dimensions can be used.
#
# If you want to rely on defaults, this:
# --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1'
# is identical to this:
# --variations '--tf32 0|--tf32 1' '|--fp16|--bf16'
#
# the leading empty variation in the 2nd dimension is a valid variation.
#
# So here we get the following 6 variations:
#
# 1. --tf32 0
# 2. --tf32 0 --fp16
# 3. --tf32 0 --bf16
# 4. --tf32 1
# 5. --tf32 1 --fp16
# 6. --tf32 1 --bf16
#
# In this particular case we don't know what the default tf32 setting is as it's normally
# pytorch-version dependent). That's why it's best to do an explicit setting of each variation:
# `--tf32 0|--tf32 1`
#
# Here is a full example of a train:
#
# CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \
# --base-cmd \
# ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \
# --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \
# --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \
# --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \
# --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \
# --source_prefix "translate English to Romanian: " --warmup_steps 50 \
# --max_train_samples 20000 --dataloader_num_workers 2 ' \
# --target-metric-key train_samples_per_second --repeat-times 1 --variations \
# '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \
# --repeat-times 1 --base-variation '--tf32 0'
#
# and here is a possible output:
#
#
# | Variation | Train | Diff | Train |
# | | samples | % | loss |
# | | per | | |
# | | second | | |
# |:----------------|----------:|-------:|--------:|
# | --tf32 0 | 285.11 | 0 | 2.51 |
# | --tf32 1 | 342.09 | 20 | 2.51 |
# | --fp16 --tf32 0 | 423.49 | 49 | 2.51 |
# | --fp16 --tf32 1 | 423.13 | 48 | 2.51 |
# | --bf16 --tf32 0 | 416.80 | 46 | 2.52 |
# | --bf16 --tf32 1 | 415.87 | 46 | 2.52 |
#
#
# So you can quickly compare the different outcomes.
#
# Typically running each experiment once is enough, but if the environment is unstable you can
# re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results.
#
# By default it'll use the lowest result as the base line to use as 100% and then compare the rest to
# it as can be seen from the table above, but you can also specify which combination is the one to use as
# the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0'
#
# --target-metric-key is there to tell the program which metrics to compare - the different metric keys are
# inside output_dir/all_results.json. e.g., to measure eval performance instead of train use:
# --target-metric-key eval_samples_per_second
# but of course you will need to adjust the --base-cmd value in the example to perform evaluation as
# well (as currently it doesn't)
#
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
A_ = float("nan")
class _snake_case :
def __init__( self : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ):
SCREAMING_SNAKE_CASE:Union[str, Any] = sys.stdout
SCREAMING_SNAKE_CASE:List[Any] = open(SCREAMING_SNAKE_CASE__ ,"a" )
def __getattr__( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ):
return getattr(self.stdout ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : str ):
self.stdout.write(SCREAMING_SNAKE_CASE__ )
# strip tqdm codes
self.file.write(re.sub(R"^.*\r" ,"" ,SCREAMING_SNAKE_CASE__ ,0 ,re.M ) )
def A_ ( snake_case=80 , snake_case=False ):
SCREAMING_SNAKE_CASE:Tuple = []
# deal with critical env vars
SCREAMING_SNAKE_CASE:Optional[int] = ["CUDA_VISIBLE_DEVICES"]
for key in env_keys:
SCREAMING_SNAKE_CASE:Any = os.environ.get(snake_case , snake_case )
if val is not None:
cmd.append(F'''{key}={val}''' )
# python executable (not always needed if the script is executable)
SCREAMING_SNAKE_CASE:Optional[int] = sys.executable if full_python_path else sys.executable.split("/" )[-1]
cmd.append(snake_case )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
SCREAMING_SNAKE_CASE:str = []
SCREAMING_SNAKE_CASE:str = ""
while len(snake_case ) > 0:
current_line += F'''{cmd.pop(0 )} '''
if len(snake_case ) == 0 or len(snake_case ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(snake_case )
SCREAMING_SNAKE_CASE:Tuple = ""
return "\\\n".join(snake_case )
def A_ ( snake_case , snake_case ):
# unwrap multi-line input
SCREAMING_SNAKE_CASE:List[Any] = re.sub(r"[\\\n]+" , " " , args.base_cmd )
# remove --output_dir if any and set our own
SCREAMING_SNAKE_CASE:int = re.sub("--output_dir\s+[^\s]+" , "" , args.base_cmd )
args.base_cmd += F''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
SCREAMING_SNAKE_CASE:Union[str, Any] = re.sub("--overwrite_output_dir\s+" , "" , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ):
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6666, 222.2222_2222] )} , )
SCREAMING_SNAKE_CASE:Union[str, Any] = subprocess.run(snake_case , capture_output=snake_case , text=snake_case )
if verbose:
print("STDOUT" , result.stdout )
print("STDERR" , result.stderr )
# save the streams
SCREAMING_SNAKE_CASE:Optional[Any] = variation.replace(" " , "-" )
with open(Path(snake_case ) / F'''log.{prefix}.stdout.txt''' , "w" ) as f:
f.write(result.stdout )
with open(Path(snake_case ) / F'''log.{prefix}.stderr.txt''' , "w" ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print("failed" )
return {target_metric_key: nan}
with io.open(F'''{output_dir}/all_results.json''' , "r" , encoding="utf-8" ) as f:
SCREAMING_SNAKE_CASE:List[str] = json.load(snake_case )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ):
SCREAMING_SNAKE_CASE:str = []
SCREAMING_SNAKE_CASE:List[str] = []
SCREAMING_SNAKE_CASE:Any = F'''{id}: {variation:<{longest_variation_len}}'''
SCREAMING_SNAKE_CASE:Tuple = F'''{preamble}: '''
SCREAMING_SNAKE_CASE:Any = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(snake_case ) , desc=snake_case , leave=snake_case ):
SCREAMING_SNAKE_CASE:Dict = process_run_single(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )
SCREAMING_SNAKE_CASE:Tuple = single_run_metrics[target_metric_key]
if not math.isnan(snake_case ):
metrics.append(snake_case )
results.append(snake_case )
outcome += "✓"
else:
outcome += "✘"
SCREAMING_SNAKE_CASE:Union[str, Any] = F'''\33[2K\r{outcome}'''
if len(snake_case ) > 0:
SCREAMING_SNAKE_CASE:Dict = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
SCREAMING_SNAKE_CASE:Union[str, Any] = round(mean_metrics[target_metric_key] , 2 )
SCREAMING_SNAKE_CASE:int = F'''{outcome} {mean_target}'''
if len(snake_case ) > 1:
results_str += F''' {tuple(round(snake_case , 2 ) for x in results )}'''
print(snake_case )
SCREAMING_SNAKE_CASE:List[str] = variation
return mean_metrics
else:
print(snake_case )
return {variation_key: variation, target_metric_key: nan}
def A_ ( ):
SCREAMING_SNAKE_CASE:int = torch.cuda.get_device_properties(torch.device("cuda" ) )
return F'''
Datetime : {datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )}
Software:
transformers: {transformers.__version__}
torch : {torch.__version__}
cuda : {torch.version.cuda}
python : {platform.python_version()}
Hardware:
{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB
'''
def A_ ( snake_case , snake_case , snake_case , snake_case , snake_case ):
SCREAMING_SNAKE_CASE:Optional[Any] = pd.DataFrame(snake_case )
SCREAMING_SNAKE_CASE:str = "variation"
SCREAMING_SNAKE_CASE:str = "diff_%"
SCREAMING_SNAKE_CASE:Union[str, Any] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
SCREAMING_SNAKE_CASE:Any = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(snake_case ):
# as a fallback, use the minimal value as the sentinel
SCREAMING_SNAKE_CASE:Any = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(snake_case ):
SCREAMING_SNAKE_CASE:Tuple = df.apply(
lambda snake_case : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis="columns" , )
# re-order columns
SCREAMING_SNAKE_CASE:Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
SCREAMING_SNAKE_CASE:Union[str, Any] = df.reindex(snake_case , axis="columns" ) # reorder cols
# capitalize
SCREAMING_SNAKE_CASE:str = df.rename(str.capitalize , axis="columns" )
# make the cols as narrow as possible
SCREAMING_SNAKE_CASE:int = df.rename(lambda snake_case : c.replace("_" , "<br>" ) , axis="columns" )
SCREAMING_SNAKE_CASE:Dict = df.rename(lambda snake_case : c.replace("_" , "\n" ) , axis="columns" )
SCREAMING_SNAKE_CASE:List[Any] = ["", "Copy between the cut-here-lines and paste as is to github or a forum"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=snake_case , floatfmt=".2f" )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=snake_case , floatfmt=".2f" )]
print("\n\n".join(snake_case ) )
def A_ ( ):
SCREAMING_SNAKE_CASE:Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"--base-cmd" , default=snake_case , type=snake_case , required=snake_case , help="Base cmd" , )
parser.add_argument(
"--variations" , default=snake_case , type=snake_case , nargs="+" , required=snake_case , help="Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'" , )
parser.add_argument(
"--base-variation" , default=snake_case , type=snake_case , help="Baseline variation to compare to. if None the minimal target value will be used to compare against" , )
parser.add_argument(
"--target-metric-key" , default=snake_case , type=snake_case , required=snake_case , help="Target metric key in output_dir/all_results.json, e.g., train_samples_per_second" , )
parser.add_argument(
"--report-metric-keys" , default="" , type=snake_case , help="Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples" , )
parser.add_argument(
"--repeat-times" , default=1 , type=snake_case , help="How many times to re-run each variation - an average will be reported" , )
parser.add_argument(
"--output_dir" , default="output_benchmark" , type=snake_case , help="The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked" , )
parser.add_argument(
"--verbose" , default=snake_case , action="store_true" , help="Whether to show the outputs of each run or just the benchmark progress" , )
SCREAMING_SNAKE_CASE:int = parser.parse_args()
SCREAMING_SNAKE_CASE:int = args.output_dir
Path(snake_case ).mkdir(exist_ok=snake_case )
SCREAMING_SNAKE_CASE:Optional[int] = get_base_command(snake_case , snake_case )
# split each dimension into its --foo variations
SCREAMING_SNAKE_CASE:Union[str, Any] = [list(map(str.strip , re.split(r"\|" , snake_case ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
SCREAMING_SNAKE_CASE:str = list(map(str.strip , map(" ".join , itertools.product(*snake_case ) ) ) )
SCREAMING_SNAKE_CASE:Dict = max(len(snake_case ) for x in variations )
# split wanted keys
SCREAMING_SNAKE_CASE:Any = args.report_metric_keys.split()
# capture prints into a log file for convenience
SCREAMING_SNAKE_CASE:Any = F'''benchmark-report-{datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S" )}.txt'''
print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(F'''and this script\'s output is also piped into {report_fn}''' )
SCREAMING_SNAKE_CASE:Any = Tee(snake_case )
print(F'''\n*** Running {len(snake_case )} benchmarks:''' )
print(F'''Base command: {" ".join(snake_case )}''' )
SCREAMING_SNAKE_CASE:str = "variation"
SCREAMING_SNAKE_CASE:Tuple = []
for id, variation in enumerate(tqdm(snake_case , desc="Total completion: " , leave=snake_case ) ):
SCREAMING_SNAKE_CASE:List[str] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , snake_case , snake_case , snake_case , snake_case , args.target_metric_key , snake_case , args.repeat_times , snake_case , args.verbose , ) )
process_results(snake_case , args.target_metric_key , snake_case , args.base_variation , snake_case )
if __name__ == "__main__":
main()
| 465
| 0
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class A ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def lowerCamelCase__ (self : Optional[Any] , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase , config_name=_UpperCAmelCase )
lowercase__ = GenerationConfig.from_pretrained(_UpperCAmelCase , config_name=_UpperCAmelCase )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , _UpperCAmelCase )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , _UpperCAmelCase )
def lowerCamelCase__ (self : Any ) -> List[Any]:
"""simple docstring"""
lowercase__ = AutoConfig.from_pretrained("""gpt2""" )
lowercase__ = GenerationConfig.from_model_config(_UpperCAmelCase )
lowercase__ = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(_UpperCAmelCase , _UpperCAmelCase )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def lowerCamelCase__ (self : List[Any] ) -> str:
"""simple docstring"""
lowercase__ = GenerationConfig()
lowercase__ = {
"""max_new_tokens""": 1024,
"""foo""": """bar""",
}
lowercase__ = copy.deepcopy(_UpperCAmelCase )
lowercase__ = generation_config.update(**_UpperCAmelCase )
# update_kwargs was not modified (no side effects)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(_UpperCAmelCase , {"""foo""": """bar"""} )
def lowerCamelCase__ (self : str ) -> Optional[int]:
"""simple docstring"""
lowercase__ = GenerationConfig()
lowercase__ = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(_UpperCAmelCase )
lowercase__ = GenerationConfig.from_pretrained(_UpperCAmelCase )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
lowercase__ = GenerationConfig.from_model_config(_UpperCAmelCase )
assert not hasattr(_UpperCAmelCase , """foo""" ) # no new kwargs should be initialized if from config
def lowerCamelCase__ (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , _UpperCAmelCase )
self.assertEqual(default_config.num_beams , 1 )
lowercase__ = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , _UpperCAmelCase )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_UpperCAmelCase )
lowercase__ = GenerationConfig.from_pretrained(_UpperCAmelCase , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , _UpperCAmelCase )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCamelCase__ (cls : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def lowerCamelCase__ (cls : Tuple ) -> List[Any]:
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def lowerCamelCase__ (self : Dict ) -> str:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id="""test-generation-config""" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
def lowerCamelCase__ (self : int ) -> Any:
"""simple docstring"""
lowercase__ = GenerationConfig(
do_sample=_UpperCAmelCase , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_UpperCAmelCase , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token )
lowercase__ = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_UpperCAmelCase , getattr(_UpperCAmelCase , _UpperCAmelCase ) )
| 15
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
snake_case__ = 0
def SCREAMING_SNAKE_CASE__ ( self:Optional[Any] ):
snake_case__ = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:str ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ = AutoImageProcessor.from_pretrained(_a ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ = CLIPImageProcessor(**_a )
# save in new folder
model_config.save_pretrained(_a )
config.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
# make sure private variable is not incorrectly saved
snake_case__ = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
with self.assertRaisesRegex(
_a , '''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ = AutoImageProcessor.from_pretrained('''clip-base''' )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] ):
with self.assertRaisesRegex(
_a , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ = AutoImageProcessor.from_pretrained(_a , revision='''aaaaaa''' )
def SCREAMING_SNAKE_CASE__ ( self:List[Any] ):
with self.assertRaisesRegex(
_a , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_a ):
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a , trust_remote_code=_a )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def SCREAMING_SNAKE_CASE__ ( self:Dict ):
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_a ):
AutoImageProcessor.register(_a , _a )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = Path(_a ) / '''preprocessor_config.json'''
snake_case__ = Path(_a ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_a , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_a , '''w''' ) )
snake_case__ = CustomImageProcessor.from_pretrained(_a )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_a )
snake_case__ = AutoImageProcessor.from_pretrained(_a )
self.assertIsInstance(_a , _a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
class __magic_name__ (snake_case_ ):
'''simple docstring'''
__lowercase : List[str] = True
try:
AutoConfig.register('''custom''' , _a )
AutoImageProcessor.register(_a , _a )
# If remote code is not set, the default is to use local
snake_case__ = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_a )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_a , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 33
| 0
|
'''simple docstring'''
from __future__ import annotations
def A__ ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif stress < 0:
raise ValueError("""Stress cannot be negative""" )
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""" )
elif area < 0:
raise ValueError("""Area cannot be negative""" )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 9
|
'''simple docstring'''
import argparse
import os
import torch
from transformers.utils import WEIGHTS_NAME
UpperCamelCase : Optional[Any] = ['small', 'medium', 'large']
UpperCamelCase : Dict = 'lm_head.decoder.weight'
UpperCamelCase : int = 'lm_head.weight'
def A__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
lowerCamelCase__ = torch.load(__lowerCAmelCase )
lowerCamelCase__ = d.pop(__lowerCAmelCase )
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(__lowerCAmelCase , __lowerCAmelCase ) )
if __name__ == "__main__":
UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument('--dialogpt_path', default='.', type=str)
UpperCamelCase : Dict = parser.parse_args()
for MODEL in DIALOGPT_MODELS:
UpperCamelCase : Any = os.path.join(args.dialogpt_path, F'{MODEL}_ft.pkl')
UpperCamelCase : str = F'./DialoGPT-{MODEL}'
convert_dialogpt_checkpoint(
checkpoint_path,
pytorch_dump_folder_path,
)
| 9
| 1
|
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase : Optional[List[str]] = None
UpperCamelCase : Tuple = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase : int = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class A__ :
"""simple docstring"""
_lowercase = True
_lowercase = None
# Automatically constructed
_lowercase = "PIL.Image.Image"
_lowercase = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
_lowercase = field(default='Image' , init=A__ , repr=A__ )
def __call__( self : Optional[int] ):
return self.pa_type
def _UpperCamelCase( self : int , lowerCamelCase__ : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
a__ : Union[str, Any] = np.array(lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCamelCase__ , lowerCamelCase__ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCamelCase__ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCamelCase__ )
elif isinstance(lowerCamelCase__ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCamelCase__ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
f'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def _UpperCamelCase( self : Any , lowerCamelCase__ : dict , lowerCamelCase__ : List[Any]=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
a__ : List[Any] = {}
a__, a__ : str = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(f'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCamelCase__ ):
a__ : Optional[int] = PIL.Image.open(lowerCamelCase__ )
else:
a__ : Any = path.split("::" )[-1]
try:
a__ : Tuple = string_to_dict(lowerCamelCase__ , config.HUB_DATASETS_URL )["repo_id"]
a__ : int = token_per_repo_id.get(lowerCamelCase__ )
except ValueError:
a__ : List[Any] = None
with xopen(lowerCamelCase__ , "rb" , use_auth_token=lowerCamelCase__ ) as f:
a__ : Tuple = BytesIO(f.read() )
a__ : Tuple = PIL.Image.open(bytes_ )
else:
a__ : int = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def _UpperCamelCase( self : Dict ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def _UpperCamelCase( self : Dict , lowerCamelCase__ : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
a__ : Dict = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
a__ : List[str] = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
a__ : str = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
a__ : Tuple = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
a__ : Any = storage.field("bytes" )
else:
a__ : List[str] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
a__ : Tuple = storage.field("path" )
else:
a__ : List[Any] = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
a__ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
a__ : Tuple = pa.array(
[encode_np_array(np.array(lowerCamelCase__ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
a__ : Tuple = pa.array([None] * len(lowerCamelCase__ ) , type=pa.string() )
a__ : Union[str, Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def _UpperCamelCase( self : List[str] , lowerCamelCase__ : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(lowerCamelCase__ : Any ):
with xopen(lowerCamelCase__ , "rb" ) as f:
a__ : int = f.read()
return bytes_
a__ : int = pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
a__ : Union[str, Any] = pa.array(
[os.path.basename(lowerCamelCase__ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
a__ : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCamelCase__ , self.pa_type )
def UpperCamelCase_ ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
a__ : Union[str, Any] = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def UpperCamelCase_ ( __a ) -> bytes:
a__ : Optional[Any] = BytesIO()
if image.format in list_image_compression_formats():
a__ : List[Any] = image.format
else:
a__ : Union[str, Any] = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__a , format=__a )
return buffer.getvalue()
def UpperCamelCase_ ( __a ) -> dict:
if hasattr(__a , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__a )}
def UpperCamelCase_ ( __a ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
a__ : List[Any] = array.dtype
a__ : Tuple = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
a__ : Optional[int] = dtype.kind
a__ : Any = dtype.itemsize
a__ : int = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
a__ : Optional[Any] = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
a__ : Optional[int] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
a__ : Optional[Any] = dtype_byteorder + dtype_kind + str(__a )
a__ : int = np.dtype(__a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
a__ : Tuple = PIL.Image.fromarray(array.astype(__a ) )
return {"path": None, "bytes": image_to_bytes(__a )}
def UpperCamelCase_ ( __a ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
a__, a__ : Any = first_non_null_value(__a )
if isinstance(__a , __a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__a , np.ndarray ):
a__ : Optional[Any] = no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
elif isinstance(__a , PIL.Image.Image ):
a__ : Optional[int] = no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
else:
return objs
else:
return objs
| 37
|
'''simple docstring'''
import math
import sys
def snake_case_ ( __snake_case : str) -> str:
lowerCAmelCase_ = ''''''
try:
with open(__snake_case , '''rb''') as binary_file:
lowerCAmelCase_ = binary_file.read()
for dat in data:
lowerCAmelCase_ = F'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print('''File not accessible''')
sys.exit()
def snake_case_ ( __snake_case : str) -> str:
lowerCAmelCase_ = {'''0''': '''0''', '''1''': '''1'''}
lowerCAmelCase_ ,lowerCAmelCase_ = '''''', ''''''
lowerCAmelCase_ = len(__snake_case)
for i in range(len(__snake_case)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
lowerCAmelCase_ = lexicon[curr_string]
result += last_match_id
lowerCAmelCase_ = last_match_id + '''0'''
if math.loga(__snake_case).is_integer():
lowerCAmelCase_ = {}
for curr_key in list(__snake_case):
lowerCAmelCase_ = lexicon.pop(__snake_case)
lowerCAmelCase_ = new_lex
lowerCAmelCase_ = last_match_id + '''1'''
index += 1
lowerCAmelCase_ = ''''''
return result
def snake_case_ ( __snake_case : str , __snake_case : str) -> None:
lowerCAmelCase_ = 8
try:
with open(__snake_case , '''wb''') as opened_file:
lowerCAmelCase_ = [
to_write[i : i + byte_length]
for i in range(0 , len(__snake_case) , __snake_case)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('''10000000''')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__snake_case , 2).to_bytes(1 , byteorder='''big'''))
except OSError:
print('''File not accessible''')
sys.exit()
def snake_case_ ( __snake_case : str) -> str:
lowerCAmelCase_ = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
lowerCAmelCase_ = data_bits[counter:]
lowerCAmelCase_ = data_bits[counter + 1 :]
return data_bits
def snake_case_ ( __snake_case : str , __snake_case : str) -> None:
lowerCAmelCase_ = read_file_binary(__snake_case)
lowerCAmelCase_ = remove_prefix(__snake_case)
lowerCAmelCase_ = decompress_data(__snake_case)
write_file_binary(__snake_case , __snake_case)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 274
| 0
|
'''simple docstring'''
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_A : Optional[int] =[num for num in range(3, 100_001, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> list[int]:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
lowerCamelCase__ : int = []
for num in range(len(lowerCamelCase__ ) ):
lowerCamelCase__ : List[Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Tuple = odd_composites[num] - 2 * i * i
if is_prime(lowerCamelCase__ ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(lowerCamelCase__ ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE_ () -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'{solution() = }')
| 710
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _lowercase ( _lowercase ):
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(UpperCamelCase__ , """width_multiplier""" ) )
class _lowercase :
def __init__( self: str , UpperCamelCase__: Optional[int] , UpperCamelCase__: str=13 , UpperCamelCase__: Any=64 , UpperCamelCase__: Optional[Any]=2 , UpperCamelCase__: str=3 , UpperCamelCase__: List[str]="swish" , UpperCamelCase__: Any=3 , UpperCamelCase__: Optional[int]=32 , UpperCamelCase__: Union[str, Any]=0.1 , UpperCamelCase__: int=0.02 , UpperCamelCase__: Dict=True , UpperCamelCase__: Dict=True , UpperCamelCase__: Any=10 , UpperCamelCase__: int=None , UpperCamelCase__: List[Any]=0.25 , UpperCamelCase__: str=0.0 , UpperCamelCase__: Optional[int]=0.0 , ):
lowerCamelCase__ : Any = parent
lowerCamelCase__ : Optional[Any] = batch_size
lowerCamelCase__ : Optional[int] = image_size
lowerCamelCase__ : str = patch_size
lowerCamelCase__ : Optional[int] = num_channels
lowerCamelCase__ : Optional[Any] = make_divisible(512 * width_multiplier , divisor=8 )
lowerCamelCase__ : List[str] = hidden_act
lowerCamelCase__ : Any = conv_kernel_size
lowerCamelCase__ : Any = output_stride
lowerCamelCase__ : Union[str, Any] = classifier_dropout_prob
lowerCamelCase__ : List[str] = use_labels
lowerCamelCase__ : Optional[Any] = is_training
lowerCamelCase__ : List[str] = num_labels
lowerCamelCase__ : Dict = initializer_range
lowerCamelCase__ : List[Any] = scope
lowerCamelCase__ : Tuple = width_multiplier
lowerCamelCase__ : List[Any] = ffn_dropout
lowerCamelCase__ : Any = attn_dropout
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase__ : Union[str, Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCamelCase_ ( self: List[Any] ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: Optional[Any] , UpperCamelCase__: int , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int] ):
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : str = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Tuple ):
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = MobileViTVaForImageClassification(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : int = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: Any , UpperCamelCase__: Optional[Any] , UpperCamelCase__: str ):
lowerCamelCase__ : List[str] = self.num_labels
lowerCamelCase__ : Union[str, Any] = MobileViTVaForSemanticSegmentation(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCamelCase__ : Tuple = model(UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__ : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ : Any = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = config_and_inputs
lowerCamelCase__ : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowercase ( _lowercase , _lowercase , unittest.TestCase ):
a = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a = (
{
"""feature-extraction""": MobileViTVaModel,
"""image-classification""": MobileViTVaForImageClassification,
"""image-segmentation""": MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a = False
a = False
a = False
a = False
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Tuple = MobileViTVaModelTester(self )
lowerCamelCase__ : List[str] = MobileViTVaConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ )
def lowerCamelCase_ ( self: Tuple ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def lowerCamelCase_ ( self: List[str] ):
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def lowerCamelCase_ ( self: Union[str, Any] ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def lowerCamelCase_ ( self: int ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowerCamelCase_ ( self: Tuple ):
pass
def lowerCamelCase_ ( self: Tuple ):
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Union[str, Any] = model_class(UpperCamelCase__ )
lowerCamelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Tuple = [*signature.parameters.keys()]
lowerCamelCase__ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def lowerCamelCase_ ( self: List[str] ):
def check_hidden_states_output(UpperCamelCase__: Union[str, Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Optional[Any] ):
lowerCamelCase__ : List[Any] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Tuple = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
lowerCamelCase__ : Optional[int] = outputs.hidden_states
lowerCamelCase__ : List[Any] = 5
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase__ : int = 2
for i in range(len(UpperCamelCase__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase__ , lowerCamelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : int = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : str = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCamelCase__ )
def lowerCamelCase_ ( self: Dict ):
lowerCamelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCamelCase__ )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Union[str, Any] = MobileViTVaModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE_ () -> Optional[int]:
lowerCamelCase__ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowercase ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self: Tuple ):
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = self.default_image_processor
lowerCamelCase__ : List[Any] = prepare_img()
lowerCamelCase__ : Any = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : int = model(**UpperCamelCase__ )
# verify the logits
lowerCamelCase__ : str = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Union[str, Any] ):
lowerCamelCase__ : int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Any = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(**UpperCamelCase__ )
lowerCamelCase__ : str = outputs.logits
# verify the logits
lowerCamelCase__ : List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , UpperCamelCase__ )
lowerCamelCase__ : Any = torch.tensor(
[
[[7.0_863, 7.1_525, 6.8_201], [6.6_931, 6.8_770, 6.8_933], [6.2_978, 7.0_366, 6.9_636]],
[[-3.7_134, -3.6_712, -3.6_675], [-3.5_825, -3.3_549, -3.4_777], [-3.3_435, -3.3_979, -3.2_857]],
[[-2.9_329, -2.8_003, -2.7_369], [-3.0_564, -2.4_780, -2.0_207], [-2.6_889, -1.9_298, -1.7_640]],
] , device=UpperCamelCase__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
@slow
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ : Optional[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : List[Any] = model.to(UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
lowerCamelCase__ : Optional[Any] = prepare_img()
lowerCamelCase__ : Dict = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCamelCase__ : Dict = model(**UpperCamelCase__ )
lowerCamelCase__ : List[str] = outputs.logits.detach().cpu()
lowerCamelCase__ : List[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ , target_sizes=[(50, 60)] )
lowerCamelCase__ : int = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = image_processor.post_process_semantic_segmentation(outputs=UpperCamelCase__ )
lowerCamelCase__ : int = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , UpperCamelCase__ )
| 631
| 0
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[str] ,_a : List[str]=None ,_a : Optional[int]=None ,**_a : List[str] ):
'''simple docstring'''
A_ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[Any] = kwargs.pop("""feature_extractor""" )
A_ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Dict = self.image_processor
def __call__( self : Tuple ,_a : List[str] ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Tuple = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : List[Any] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : int ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : str ,*_a : Union[str, Any] ,**_a : List[str] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Dict = self.tokenizer.model_input_names
A_ : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 665
|
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import LlamaConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaTokenizer
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[str]=7 ,_a : Dict=True ,_a : List[Any]=True ,_a : Dict=False ,_a : Optional[int]=True ,_a : List[Any]=99 ,_a : Any=32 ,_a : Optional[int]=5 ,_a : List[Any]=4 ,_a : int=37 ,_a : List[Any]="gelu" ,_a : List[str]=0.1 ,_a : Union[str, Any]=0.1 ,_a : Any=512 ,_a : int=16 ,_a : Optional[int]=2 ,_a : Any=0.02 ,_a : Any=3 ,_a : Any=4 ,_a : List[str]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Any = batch_size
A_ : Tuple = seq_length
A_ : List[str] = is_training
A_ : Tuple = use_input_mask
A_ : Dict = use_token_type_ids
A_ : List[Any] = use_labels
A_ : Union[str, Any] = vocab_size
A_ : Any = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : str = intermediate_size
A_ : Tuple = hidden_act
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : List[str] = max_position_embeddings
A_ : int = type_vocab_size
A_ : Union[str, Any] = type_sequence_label_size
A_ : Any = initializer_range
A_ : List[Any] = num_labels
A_ : Optional[Any] = num_choices
A_ : List[Any] = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Dict = None
if self.use_token_type_ids:
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : str = None
A_ : Any = None
A_ : str = None
if self.use_labels:
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : Optional[Any] ):
'''simple docstring'''
return LlamaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : Union[str, Any] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Any ,_a : Any ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Tuple ):
'''simple docstring'''
A_ : Any = LlamaModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Optional[int] ,_a : int ,_a : List[str] ,_a : Any ,_a : Any ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : List[str] ,):
'''simple docstring'''
A_ : List[str] = True
A_ : Union[str, Any] = LlamaModel(_a )
model.to(_a )
model.eval()
A_ : Tuple = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,)
A_ : List[Any] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,)
A_ : int = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Any ,_a : Any ,_a : Optional[int] ,_a : List[Any] ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[int] ,_a : List[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : List[Any] = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : str ,_a : List[Any] ,_a : Dict ,_a : str ,_a : Tuple ,_a : Tuple ,_a : Tuple ,_a : Optional[Any] ,_a : Dict ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = True
A_ : Any = True
A_ : Tuple = LlamaForCausalLM(config=_a )
model.to(_a )
model.eval()
# first forward pass
A_ : Optional[int] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,use_cache=_a ,)
A_ : Tuple = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A_ : int = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A_ : List[Any] = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 )
A_ : int = torch.cat([input_mask, next_mask] ,dim=-1 )
A_ : List[str] = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
A_ : Any = model(
_a ,attention_mask=_a ,encoder_hidden_states=_a ,encoder_attention_mask=_a ,past_key_values=_a ,output_hidden_states=_a ,)["""hidden_states"""][0]
# select random slice
A_ : List[str] = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A_ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
A_ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_a ,_a ,atol=1e-3 ) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Any = config_and_inputs
A_ : int = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (LlamaModel, LlamaForCausalLM, LlamaForSequenceClassification) if is_torch_available() else ()
a_ = (LlamaForCausalLM,) if is_torch_available() else ()
a_ = (
{
"""feature-extraction""": LlamaModel,
"""text-classification""": LlamaForSequenceClassification,
"""text-generation""": LlamaForCausalLM,
"""zero-shot""": LlamaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = LlamaModelTester(self )
A_ : List[str] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : Dict = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
A_ : List[str] = 3
A_ : Any = input_dict["""input_ids"""]
A_ : Union[str, Any] = input_ids.ne(1 ).to(_a )
A_ : Union[str, Any] = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : int = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Dict ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : str = 3
A_ : Union[str, Any] = """single_label_classification"""
A_ : Union[str, Any] = input_dict["""input_ids"""]
A_ : List[Any] = input_ids.ne(1 ).to(_a )
A_ : Dict = ids_tensor([self.model_tester.batch_size] ,self.model_tester.type_sequence_label_size )
A_ : List[Any] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[str] = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Dict = 3
A_ : Dict = """multi_label_classification"""
A_ : Any = input_dict["""input_ids"""]
A_ : Optional[Any] = input_ids.ne(1 ).to(_a )
A_ : List[str] = ids_tensor(
[self.model_tester.batch_size, config.num_labels] ,self.model_tester.type_sequence_label_size ).to(torch.float )
A_ : Optional[int] = LlamaForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,attention_mask=_a ,labels=_a )
self.assertEqual(result.logits.shape ,(self.model_tester.batch_size, self.model_tester.num_labels) )
@unittest.skip("""LLaMA buffers include complex numbers, which breaks this test""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def _a ( self : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Tuple = ids_tensor([1, 10] ,config.vocab_size )
A_ : Union[str, Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] ,config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : int = LlamaModel(_a )
original_model.to(_a )
original_model.eval()
A_ : Tuple = original_model(_a ).last_hidden_state
A_ : Union[str, Any] = original_model(_a ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
A_ : Tuple = {"""type""": scaling_type, """factor""": 10.0}
A_ : int = LlamaModel(_a )
scaled_model.to(_a )
scaled_model.eval()
A_ : List[Any] = scaled_model(_a ).last_hidden_state
A_ : Any = scaled_model(_a ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_a ,_a ,atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(_a ,_a ,atol=1e-5 ) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : List[str] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-7b-hf""" ,device_map="""auto""" )
A_ : str = model(torch.tensor([input_ids] ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-6.6550, -4.1227, -4.9859, -3.2406, 0.8262, -3.0033, 1.2964, -3.3699]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-12.8281, -7.4453, -0.4639, -8.0625, -7.2500, -8.0000, -6.4883, -7.7695, -7.8438, -7.0312, -6.2188, -7.1328, -1.8496, 1.9961, -8.6250, -6.7227, -12.8281, -6.9492, -7.0742, -7.7852, -7.5820, -7.9062, -6.9375, -7.9805, -8.3438, -8.1562, -8.0469, -7.6250, -7.7422, -7.3398,] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : str = torch.tensor([[-2.0622, -1.2794, -1.1638, -0.9788, -1.4603, -1.0238, -1.7893, -1.4411]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : str = torch.tensor([-8.1406, -8.0547, 2.7461, -1.2344, -0.1448, -1.8262, -1.0020, -1.8154, -1.6895, -1.8516, -2.3574, -0.9277, 3.7598, 6.5742, -1.2998, -0.1177, -8.1406, -2.9688, -2.9199, -3.1699, -3.5254, -2.3555, -2.7988, -3.4141, -2.8262, -4.5195, -3.3379, -3.3164, -2.7832, -3.0273] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Logits are not exactly the same, once we fix the instabalities somehow, will update!""" )
@slow
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : Optional[int] = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""auto""" )
A_ : int = model(torch.tensor(_a ) )
# Expected mean on dim = -1
A_ : Union[str, Any] = torch.tensor([[-0.8562, -1.8520, -0.7551, -0.4162, -1.5161, -1.2038, -2.4823, -2.3254]] )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# slicing logits[0, 0, 0:30]
# fmt: off
A_ : Optional[int] = torch.tensor([-2.2227, 4.8828, 0.9023, -0.4578, -0.7871, -0.1033, -0.6221, -0.5786, -0.7803, -1.0674, -1.2920, -0.1570, 0.8008, 2.0723, -0.9497, 0.2771, -2.2227, -0.7612, -1.4346, -1.2061, -1.6426, -0.3000, -0.7139, -1.1934, -1.8691, -1.6973, -1.5947, -1.2705, -0.3523, -0.5513] )
# fmt: on
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
@unittest.skip(
"""Logits are not exactly the same, once we fix the instabalities somehow, will update! Also it is gonna be a `too_slow` test""" )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = [1, 306, 4658, 278, 6593, 310, 2834, 338]
A_ : str = LlamaForCausalLM.from_pretrained("""meta-llama/Llama-2-70b-hf""" ,device_map="""auto""" )
A_ : Tuple = model(torch.tensor(_a ) )
A_ : Dict = torch.tensor(
[[-4.2327, -3.3360, -4.6665, -4.7631, -1.8180, -3.4170, -1.4211, -3.1810]] ,dtype=torch.floataa )
torch.testing.assert_close(out.mean(-1 ) ,_a ,atol=1e-2 ,rtol=1e-2 )
# fmt: off
A_ : List[str] = torch.tensor([-9.4922, -3.9551, 1.7998, -5.6758, -5.1055, -5.8984, -4.8320, -6.8086, -6.5391, -5.6172, -5.5820, -5.5352, 1.7881, 3.6289, -6.5117, -3.4785, -9.5000, -6.0352, -6.8125, -6.0195, -6.6836, -5.4727, -6.2812, -6.0391, -7.3398, -7.4297, -7.4844, -6.5820, -5.8789, -5.5312] )
# fmt: on
torch.testing.assert_close(out[0, 0, :30] ,_a ,atol=1e-5 ,rtol=1e-5 )
@unittest.skip("""Model is curently gated""" )
@slow
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = """Simply put, the theory of relativity states that 1) the laws of physics are the same everywhere in the universe and 2) the passage of time and the length of objects can vary depending on the observer\'s frame of reference.\n\nThe first part of the theory, that the laws of physics are the same everywhere, is known as the \"princi"""
A_ : List[str] = """Simply put, the theory of relativity states that """
A_ : Any = LlamaTokenizer.from_pretrained("""meta-llama/Llama-2-13b-chat-hf""" )
A_ : Union[str, Any] = tokenizer.encode(_a ,return_tensors="""pt""" )
A_ : List[str] = LlamaForCausalLM.from_pretrained(
"""meta-llama/Llama-2-13b-chat-hf""" ,device_map="""sequential""" ,use_safetensors=_a )
# greedy generation outputs
A_ : str = model.generate(_a ,max_new_tokens=64 ,top_p=_a ,temperature=1 ,do_sample=_a )
A_ : Optional[Any] = tokenizer.decode(generated_ids[0] ,skip_special_tokens=_a )
self.assertEqual(_a ,_a )
| 665
| 1
|
'''simple docstring'''
import torch
from diffusers import DiffusionPipeline
class _lowerCAmelCase ( __UpperCAmelCase ):
def __init__(self , lowercase , lowercase ):
super().__init__()
self.register_modules(unet=lowercase , scheduler=lowercase )
def __call__(self ):
A_ : str = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
A_ : Optional[Any] = 1
A_ : Tuple = self.unet(lowercase , lowercase ).sample
A_ : Optional[int] = self.scheduler.step(lowercase , lowercase , lowercase ).prev_sample
A_ : Any = scheduler_output - scheduler_output + torch.ones_like(lowercase )
return result
| 686
|
'''simple docstring'''
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
lowerCamelCase :int = datasets.load_iris()
lowerCamelCase :str = np.array(data['''data'''])
lowerCamelCase :Dict = np.array(data['''target'''])
lowerCamelCase :Union[str, Any] = data['''target_names''']
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase :str = train_test_split(X, y)
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
return np.linalg.norm(np.array(lowerCamelCase__ ) - np.array(lowerCamelCase__ ) )
def a ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=5 ):
'''simple docstring'''
A_ : List[str] = zip(lowerCamelCase__ , lowerCamelCase__ )
# List of distances of all points from the point to be classified
A_ : List[str] = []
for data_point in data:
A_ : Any = euclidean_distance(data_point[0] , lowerCamelCase__ )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
A_ : Optional[Any] = [i[1] for i in sorted(lowerCamelCase__ )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
A_ : Tuple = Counter(lowerCamelCase__ ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 686
| 1
|
'''simple docstring'''
from importlib import import_module
from .logging import get_logger
lowerCamelCase :int = get_logger(__name__)
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=None ):
A_ : Tuple = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , snake_case__ , getattr(snake_case__ , snake_case__ ) )
A_ : Union[str, Any] = module._original_module if isinstance(snake_case__ , _PatchedModuleObj ) else module
class _lowerCAmelCase :
__SCREAMING_SNAKE_CASE : List[str] = []
def __init__(self , lowercase , lowercase , lowercase , lowercase=None ):
A_ : Optional[Any] = obj
A_ : int = target
A_ : Dict = new
A_ : Union[str, Any] = target.split(""".""" )[0]
A_ : Any = {}
A_ : List[str] = attrs or []
def __enter__(self ):
*A_, A_ : List[Any] = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(snake_case__ ) ):
try:
A_ : Union[str, Any] = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
A_ : Dict = getattr(self.obj , snake_case__ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(snake_case__ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
A_ : List[Any] = obj_attr
# patch at top level
setattr(self.obj , snake_case__ , _PatchedModuleObj(snake_case__ , attrs=self.attrs ) )
A_ : Optional[Any] = getattr(self.obj , snake_case__ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(snake_case__ , snake_case__ , _PatchedModuleObj(getattr(snake_case__ , snake_case__ , snake_case__ ) , attrs=self.attrs ) )
A_ : List[Any] = getattr(snake_case__ , snake_case__ )
# finally set the target attribute
setattr(snake_case__ , snake_case__ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
A_ : List[str] = getattr(import_module(""".""".join(snake_case__ ) ) , snake_case__ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , snake_case__ ) is attr_value:
A_ : Dict = getattr(self.obj , snake_case__ )
setattr(self.obj , snake_case__ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
A_ : int = globals()["""__builtins__"""][target_attr]
setattr(self.obj , snake_case__ , self.new )
else:
raise RuntimeError(F'Tried to patch attribute {target_attr} instead of a submodule.' )
def __exit__(self , *lowercase ):
for attr in list(self.original ):
setattr(self.obj , snake_case__ , self.original.pop(snake_case__ ) )
def _a (self ):
self.__enter__()
self._active_patches.append(self )
def _a (self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 667
|
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a_ : int = (
"4S 3H 2C 7S 5H",
"9D 8H 2C 6S 7H",
"2D 6D 9D TH 7D",
"TC 8C 2S JH 6C",
"JH 8S TH AH QH",
"TS KS 5S 9S AC",
"KD 6S 9D TH AD",
"KS 8D 4D 9S 4S", # pair
"8C 4S KH JS 4D", # pair
"QH 8H KD JH 8S", # pair
"KC 4H KS 2H 8D", # pair
"KD 4S KC 3H 8S", # pair
"AH 8S AS KC JH", # pair
"3H 4C 4H 3S 2H", # 2 pairs
"5S 5D 2C KH KH", # 2 pairs
"3C KH 5D 5S KH", # 2 pairs
"AS 3C KH AD KH", # 2 pairs
"7C 7S 3S 7H 5S", # 3 of a kind
"7C 7S KH 2H 7H", # 3 of a kind
"AC KH QH AH AS", # 3 of a kind
"2H 4D 3C AS 5S", # straight (low ace)
"3C 5C 4C 2C 6H", # straight
"6S 8S 7S 5H 9H", # straight
"JS QS 9H TS KH", # straight
"QC KH TS JS AH", # straight (high ace)
"8C 9C 5C 3C TC", # flush
"3S 8S 9S 5S KS", # flush
"4C 5C 9C 8C KC", # flush
"JH 8H AH KH QH", # flush
"3D 2H 3H 2C 2D", # full house
"2H 2C 3S 3H 3D", # full house
"KH KC 3S 3H 3D", # full house
"JC 6H JS JD JH", # 4 of a kind
"JC 7H JS JD JH", # 4 of a kind
"JC KH JS JD JH", # 4 of a kind
"2S AS 4S 5S 3S", # straight flush (low ace)
"2D 6D 3D 4D 5D", # straight flush
"5C 6C 3C 7C 4C", # straight flush
"JH 9H TH KH QH", # straight flush
"JH AH TH KH QH", # royal flush (high ace straight flush)
)
a_ : int = (
("2H 3H 4H 5H 6H", "KS AS TS QS JS", "Loss"),
("2H 3H 4H 5H 6H", "AS AD AC AH JD", "Win"),
("AS AH 2H AD AC", "JS JD JC JH 3D", "Win"),
("2S AH 2H AS AC", "JS JD JC JH AD", "Loss"),
("2S AH 2H AS AC", "2H 3H 5H 6H 7H", "Win"),
("AS 3S 4S 8S 2S", "2H 3H 5H 6H 7H", "Win"),
("2H 3H 5H 6H 7H", "2S 3H 4H 5S 6C", "Win"),
("2S 3H 4H 5S 6C", "3D 4C 5H 6H 2S", "Tie"),
("2S 3H 4H 5S 6C", "AH AC 5H 6H AS", "Win"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H AS", "Loss"),
("2S 2H 4H 5S 4C", "AH AC 5H 6H 7S", "Win"),
("6S AD 7H 4S AS", "AH AC 5H 6H 7S", "Loss"),
("2S AH 4H 5S KC", "AH AC 5H 6H 7S", "Loss"),
("2S 3H 6H 7S 9C", "7H 3C TH 6H 9S", "Loss"),
("4S 5H 6H TS AC", "3S 5H 6H TS AC", "Win"),
("2S AH 4H 5S 6C", "AD 4C 5H 6H 2C", "Tie"),
("AS AH 3H AD AC", "AS AH 2H AD AC", "Win"),
("AH AC 5H 5C QS", "AH AC 5H 5C KS", "Loss"),
("AH AC 5H 5C QS", "KH KC 5H 5C QS", "Win"),
("7C 7S KH 2H 7H", "3C 3S AH 2H 3H", "Win"),
("3C 3S AH 2H 3H", "7C 7S KH 2H 7H", "Loss"),
("6H 5H 4H 3H 2H", "5H 4H 3H 2H AH", "Win"),
("5H 4H 3H 2H AH", "5H 4H 3H 2H AH", "Tie"),
("5H 4H 3H 2H AH", "6H 5H 4H 3H 2H", "Loss"),
("AH AD KS KC AC", "AH KD KH AC KC", "Win"),
("2H 4D 3C AS 5S", "2H 4D 3C 6S 5S", "Loss"),
("2H 3S 3C 3H 2S", "3S 3C 2S 2H 2D", "Win"),
("4D 6D 5D 2D JH", "3S 8S 3H TC KH", "Loss"),
("4S 6C 8S 3S 7S", "AD KS 2D 7D 7C", "Loss"),
("6S 4C 7H 8C 3H", "5H JC AH 9D 9C", "Loss"),
("9D 9H JH TC QH", "3C 2S JS 5C 7H", "Win"),
("2H TC 8S AD 9S", "4H TS 7H 2C 5C", "Win"),
("9D 3S 2C 7S 7C", "JC TD 3C TC 9H", "Loss"),
)
a_ : Tuple = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", True),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", False),
("AS 3S 4S 8S 2S", True),
)
a_ : Tuple = (
("2H 3H 4H 5H 6H", True),
("AS AH 2H AD AC", False),
("2H 3H 5H 6H 7H", False),
("KS AS TS QS JS", True),
("8H 9H QS JS TH", True),
)
a_ : Optional[Any] = (
("2H 4D 3C AS 5S", True, [5, 4, 3, 2, 14]),
("2H 5D 3C AS 5S", False, [14, 5, 5, 3, 2]),
("JH QD KC AS TS", False, [14, 13, 12, 11, 10]),
("9D 3S 2C 7S 7C", False, [9, 7, 7, 3, 2]),
)
a_ : List[str] = (
("JH AH TH KH QH", 0),
("JH 9H TH KH QH", 0),
("JC KH JS JD JH", 7),
("KH KC 3S 3H 3D", 6),
("8C 9C 5C 3C TC", 0),
("JS QS 9H TS KH", 0),
("7C 7S KH 2H 7H", 3),
("3C KH 5D 5S KH", 2),
("QH 8H KD JH 8S", 1),
("2D 6D 9D TH 7D", 0),
)
a_ : Optional[Any] = (
("JH AH TH KH QH", 23),
("JH 9H TH KH QH", 22),
("JC KH JS JD JH", 21),
("KH KC 3S 3H 3D", 20),
("8C 9C 5C 3C TC", 19),
("JS QS 9H TS KH", 18),
("7C 7S KH 2H 7H", 17),
("3C KH 5D 5S KH", 16),
("QH 8H KD JH 8S", 15),
("2D 6D 9D TH 7D", 14),
)
def __lowerCAmelCase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = randrange(len(_UpperCamelCase ) ), randrange(len(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def __lowerCAmelCase ( _UpperCamelCase : int = 1_00 ) -> int:
'''simple docstring'''
return (generate_random_hand() for _ in range(_UpperCamelCase ))
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ) -> int:
'''simple docstring'''
assert PokerHand(_UpperCamelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
assert PokerHand(_UpperCamelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : str , _UpperCamelCase : List[Any] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = PokerHand(_UpperCamelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : str ) -> int:
'''simple docstring'''
assert PokerHand(_UpperCamelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : str ) -> str:
'''simple docstring'''
assert PokerHand(_UpperCamelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Dict ) -> Tuple:
'''simple docstring'''
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> List[Any]:
'''simple docstring'''
assert PokerHand(_UpperCamelCase ).compare_with(PokerHand(_UpperCamelCase ) ) == expected
def __lowerCAmelCase ( ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [PokerHand(_UpperCamelCase ) for hand in SORTED_HANDS]
SCREAMING_SNAKE_CASE = poker_hands.copy()
shuffle(_UpperCamelCase )
SCREAMING_SNAKE_CASE = chain(sorted(_UpperCamelCase ) )
for index, hand in enumerate(_UpperCamelCase ):
assert hand == poker_hands[index]
def __lowerCAmelCase ( ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=_UpperCamelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def __lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = PokerHand('2C 4S AS 3D 5C' )
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def __lowerCAmelCase ( ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = os.path.abspath(os.path.dirname(_UpperCamelCase ) )
SCREAMING_SNAKE_CASE = os.path.join(_UpperCamelCase , 'poker_hands.txt' )
with open(_UpperCamelCase ) as file_hand:
for line in file_hand:
SCREAMING_SNAKE_CASE = line[:14].strip()
SCREAMING_SNAKE_CASE = line[15:].strip()
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = PokerHand(_UpperCamelCase ), PokerHand(_UpperCamelCase )
SCREAMING_SNAKE_CASE = player.compare_with(_UpperCamelCase )
if output == "Win":
answer += 1
assert answer == 3_76
| 439
| 0
|
'''simple docstring'''
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: Union[str, Any] ) -> List[str]:
"""simple docstring"""
__a = botoa.client('iam' )
__a = {
'Version': '2012-10-17',
'Statement': [
{'Effect': 'Allow', 'Principal': {'Service': 'sagemaker.amazonaws.com'}, 'Action': 'sts:AssumeRole'}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=SCREAMING_SNAKE_CASE__, AssumeRolePolicyDocument=json.dumps(SCREAMING_SNAKE_CASE__, indent=2 ) )
__a = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': [
'sagemaker:*',
'ecr:GetDownloadUrlForLayer',
'ecr:BatchGetImage',
'ecr:BatchCheckLayerAvailability',
'ecr:GetAuthorizationToken',
'cloudwatch:PutMetricData',
'cloudwatch:GetMetricData',
'cloudwatch:GetMetricStatistics',
'cloudwatch:ListMetrics',
'logs:CreateLogGroup',
'logs:CreateLogStream',
'logs:DescribeLogStreams',
'logs:PutLogEvents',
'logs:GetLogEvents',
's3:CreateBucket',
's3:ListBucket',
's3:GetBucketLocation',
's3:GetObject',
's3:PutObject',
],
'Resource': '*',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=SCREAMING_SNAKE_CASE__, PolicyName=f"""{role_name}_policy_permission""", PolicyDocument=json.dumps(SCREAMING_SNAKE_CASE__, indent=2 ), )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f"""role {role_name} already exists. Using existing one""" )
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[str] ) -> Optional[Any]:
"""simple docstring"""
__a = botoa.client('iam' )
return iam_client.get_role(RoleName=SCREAMING_SNAKE_CASE__ )["Role"]["Arn"]
def __UpperCAmelCase ( ) -> Any:
"""simple docstring"""
__a = _ask_options(
'How do you want to authorize?', ['AWS Profile', 'Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '], SCREAMING_SNAKE_CASE__, )
__a = None
if credentials_configuration == 0:
__a = _ask_field('Enter your AWS Profile name: [default] ', default='default' )
__a = aws_profile
else:
print(
'Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'
'`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`' )
__a = _ask_field('AWS Access Key ID: ' )
__a = aws_access_key_id
__a = _ask_field('AWS Secret Access Key: ' )
__a = aws_secret_access_key
__a = _ask_field('Enter your AWS Region: [us-east-1]', default='us-east-1' )
__a = aws_region
__a = _ask_options(
'Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?', ['Provide IAM Role name', 'Create new IAM role using credentials'], SCREAMING_SNAKE_CASE__, )
if role_management == 0:
__a = _ask_field('Enter your IAM role name: ' )
else:
__a = 'accelerate_sagemaker_execution_role'
print(f"""Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials""" )
_create_iam_role_for_sagemaker(SCREAMING_SNAKE_CASE__ )
__a = _ask_field(
'Do you want to use custom Docker image? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_custom_docker_image:
__a = _ask_field('Enter your Docker image: ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower() )
__a = _ask_field(
'Do you want to provide SageMaker input channels with data locations? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_sagemaker_inputs_enabled:
__a = _ask_field(
'Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), )
__a = _ask_field(
'Do you want to enable SageMaker metrics? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = None
if is_sagemaker_metrics_enabled:
__a = _ask_field(
'Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ', lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), )
__a = _ask_options(
'What is the distributed mode?', ['No distributed training', 'Data parallelism'], _convert_sagemaker_distributed_mode, )
__a = {}
__a = _ask_field(
'Do you wish to optimize your script with torch dynamo?[yes/NO]:', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
if use_dynamo:
__a = 'dynamo_'
__a = _ask_options(
'Which dynamo backend would you like to use?', [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, )
__a = _ask_field(
'Do you want to customize the defaults sent to torch.compile? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
if use_custom_options:
__a = _ask_options(
'Which mode do you want to use?', SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : TORCH_DYNAMO_MODES[int(SCREAMING_SNAKE_CASE__ )], default='default', )
__a = _ask_field(
'Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = _ask_field(
'Do you want to enable dynamic shape tracing? [yes/NO]: ', _convert_yes_no_to_bool, default=SCREAMING_SNAKE_CASE__, error_message='Please enter yes or no.', )
__a = 'Which EC2 instance type you want to use for your training?'
if distributed_type != SageMakerDistributedType.NO:
__a = _ask_options(
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(SCREAMING_SNAKE_CASE__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
__a = _ask_field(SCREAMING_SNAKE_CASE__, lambda SCREAMING_SNAKE_CASE__ : str(SCREAMING_SNAKE_CASE__ ).lower(), default='ml.p3.2xlarge' )
__a = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
__a = _ask_field(
'How many machines do you want use? [1]: ', SCREAMING_SNAKE_CASE__, default=1, )
__a = _ask_options(
'Do you wish to use FP16 or BF16 (mixed precision)?', ['no', 'fp16', 'bf16', 'fp8'], _convert_mixed_precision, )
if use_dynamo and mixed_precision == "no":
print(
'Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.' )
return SageMakerConfig(
image_uri=SCREAMING_SNAKE_CASE__, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=SCREAMING_SNAKE_CASE__, use_cpu=SCREAMING_SNAKE_CASE__, dynamo_config=SCREAMING_SNAKE_CASE__, eca_instance_type=SCREAMING_SNAKE_CASE__, profile=SCREAMING_SNAKE_CASE__, region=SCREAMING_SNAKE_CASE__, iam_role_name=SCREAMING_SNAKE_CASE__, mixed_precision=SCREAMING_SNAKE_CASE__, num_machines=SCREAMING_SNAKE_CASE__, sagemaker_inputs_file=SCREAMING_SNAKE_CASE__, sagemaker_metrics_file=SCREAMING_SNAKE_CASE__, )
| 718
|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def __UpperCAmelCase ( SCREAMING_SNAKE_CASE__: List[Any] ) -> Dict:
"""simple docstring"""
__a , __a = image.size
__a , __a = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
__a = image.resize((w, h), resample=PIL_INTERPOLATION['lanczos'] )
__a = np.array(SCREAMING_SNAKE_CASE__ ).astype(np.floataa ) / 2_5_5.0
__a = image[None].transpose(0, 3, 1, 2 )
__a = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
return 2.0 * image - 1.0
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , ) ->Dict:
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=lowerCamelCase , unet=lowerCamelCase , scheduler=lowerCamelCase )
@torch.no_grad()
def __call__( self , lowerCamelCase = None , lowerCamelCase = 1 , lowerCamelCase = 100 , lowerCamelCase = 0.0 , lowerCamelCase = None , lowerCamelCase = "pil" , lowerCamelCase = True , ) ->Union[Tuple, ImagePipelineOutput]:
'''simple docstring'''
if isinstance(lowerCamelCase , PIL.Image.Image ):
__a = 1
elif isinstance(lowerCamelCase , torch.Tensor ):
__a = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(lowerCamelCase )}""" )
if isinstance(lowerCamelCase , PIL.Image.Image ):
__a = preprocess(lowerCamelCase )
__a , __a = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
__a = (batch_size, self.unet.config.in_channels // 2, height, width)
__a = next(self.unet.parameters() ).dtype
__a = randn_tensor(lowerCamelCase , generator=lowerCamelCase , device=self.device , dtype=lowerCamelCase )
__a = image.to(device=self.device , dtype=lowerCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(lowerCamelCase , device=self.device )
__a = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
__a = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a = {}
if accepts_eta:
__a = eta
for t in self.progress_bar(lowerCamelCase ):
# concat latents and low resolution image in the channel dimension.
__a = torch.cat([latents, image] , dim=1 )
__a = self.scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
# predict the noise residual
__a = self.unet(lowerCamelCase , lowerCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
__a = self.scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase , **lowerCamelCase ).prev_sample
# decode the image latents with the VQVAE
__a = self.vqvae.decode(lowerCamelCase ).sample
__a = torch.clamp(lowerCamelCase , -1.0 , 1.0 )
__a = image / 2 + 0.5
__a = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__a = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase )
| 270
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ = get_activation('''gelu''' )
self.assertTrue(torch.allclose(gelu_python(__snake_case ) ,torch_builtin(__snake_case ) ) )
self.assertFalse(torch.allclose(gelu_python(__snake_case ) ,gelu_new(__snake_case ) ) )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
A_ = get_activation('''gelu''' )
A_ = get_activation('''gelu_10''' )
A_ = torch_builtin(__snake_case )
A_ = geluaa(__snake_case )
A_ = torch.where(y_gelu_aa < 10.0 ,1 ,0 )
self.assertTrue(torch.max(__snake_case ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask ,y_gelu_aa * clipped_mask ) )
def __UpperCAmelCase ( self ):
"""simple docstring"""
get_activation('''gelu''' )
get_activation('''gelu_10''' )
get_activation('''gelu_fast''' )
get_activation('''gelu_new''' )
get_activation('''gelu_python''' )
get_activation('''gelu_pytorch_tanh''' )
get_activation('''linear''' )
get_activation('''mish''' )
get_activation('''quick_gelu''' )
get_activation('''relu''' )
get_activation('''sigmoid''' )
get_activation('''silu''' )
get_activation('''swish''' )
get_activation('''tanh''' )
with self.assertRaises(__snake_case ):
get_activation('''bogus''' )
with self.assertRaises(__snake_case ):
get_activation(__snake_case )
def __UpperCAmelCase ( self ):
"""simple docstring"""
A_ = get_activation('''gelu''' )
A_ = 1
A_ = get_activation('''gelu''' )
self.assertEqual(acta.a ,1 )
with self.assertRaises(__snake_case ):
A_ = acta.a
| 188
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a__ : str = {
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = ['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[Any] = [
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Dict = [
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
a__ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 188
| 1
|
def lowercase__ ( __A: str ):
'''simple docstring'''
__magic_name__ : Dict = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def lowercase__ ( __A: str ):
'''simple docstring'''
__magic_name__ : Optional[int] = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
__magic_name__ : Union[str, Any] = remove_duplicates(key.upper() )
__magic_name__ : Dict = len(__A )
# First fill cipher with key characters
__magic_name__ : Union[str, Any] = {alphabet[i]: char for i, char in enumerate(__A )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__A ) ,2_6 ):
__magic_name__ : List[str] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
__magic_name__ : List[Any] = alphabet[i - offset]
__magic_name__ : Union[str, Any] = char
return cipher_alphabet
def lowercase__ ( __A: str ,__A: dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(__A ,__A ) for ch in message.upper() )
def lowercase__ ( __A: str ,__A: dict[str, str] ):
'''simple docstring'''
__magic_name__ : str = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__A ,__A ) for ch in message.upper() )
def lowercase__ ( ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = input('''Enter message to encode or decode: ''' ).strip()
__magic_name__ : int = input('''Enter keyword: ''' ).strip()
__magic_name__ : int = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
__magic_name__ : Optional[Any] = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
__magic_name__ : Tuple = create_cipher_map(__A )
print(func(__A ,__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 501
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 501
| 1
|
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( _snake_case :Dict ) -> Optional[int]:
_A = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_A = 128
elif "12-12" in model_name:
_A = 12
_A = 12
elif "14-14" in model_name:
_A = 14
_A = 14
elif "16-16" in model_name:
_A = 16
_A = 16
else:
raise ValueError('''Model not supported''' )
_A = '''huggingface/label-files'''
if "speech-commands" in model_name:
_A = 35
_A = '''speech-commands-v2-id2label.json'''
else:
_A = 527
_A = '''audioset-id2label.json'''
_A = json.load(open(hf_hub_download(_snake_case , _snake_case , repo_type='''dataset''' ) , '''r''' ) )
_A = {int(_snake_case ): v for k, v in idalabel.items()}
_A = idalabel
_A = {v: k for k, v in idalabel.items()}
return config
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] ) -> Tuple:
if "module.v" in name:
_A = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
_A = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
_A = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
_A = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
_A = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
_A = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
_A = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
_A = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
_A = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
_A = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
_A = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_A = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_A = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
_A = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
_A = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def SCREAMING_SNAKE_CASE_ ( _snake_case :Any , _snake_case :List[str] ) -> List[Any]:
for key in orig_state_dict.copy().keys():
_A = orig_state_dict.pop(_snake_case )
if "qkv" in key:
_A = key.split('''.''' )
_A = int(key_split[3] )
_A = config.hidden_size
if "weight" in key:
_A = val[:dim, :]
_A = val[dim : dim * 2, :]
_A = val[-dim:, :]
else:
_A = val[:dim]
_A = val[dim : dim * 2]
_A = val[-dim:]
else:
_A = val
return orig_state_dict
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[str] ) -> Dict:
_A = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(_snake_case , _snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( _snake_case :Tuple , _snake_case :Dict , _snake_case :List[Any]=False ) -> Optional[Any]:
_A = get_audio_spectrogram_transformer_config(_snake_case )
_A = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
_A = model_name_to_url[model_name]
_A = torch.hub.load_state_dict_from_url(_snake_case , map_location='''cpu''' )
# remove some keys
remove_keys(_snake_case )
# rename some keys
_A = convert_state_dict(_snake_case , _snake_case )
# load 🤗 model
_A = ASTForAudioClassification(_snake_case )
model.eval()
model.load_state_dict(_snake_case )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_A = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
_A = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
_A = 1_024 if '''speech-commands''' not in model_name else 128
_A = ASTFeatureExtractor(mean=_snake_case , std=_snake_case , max_length=_snake_case )
if "speech-commands" in model_name:
_A = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
_A = dataset[0]['''audio''']['''array''']
else:
_A = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
_A , _A = torchaudio.load(_snake_case )
_A = waveform.squeeze().numpy()
_A = feature_extractor(_snake_case , sampling_rate=16_000 , return_tensors='''pt''' )
# forward pass
_A = model(**_snake_case )
_A = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_A = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_A = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_A = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_A = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_A = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_A = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_A = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
_A = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , _snake_case , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_snake_case ).mkdir(exist_ok=_snake_case )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(_snake_case )
print(F'''Saving feature extractor to {pytorch_dump_folder_path}''' )
feature_extractor.save_pretrained(_snake_case )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'''MIT/{model_name}''' )
feature_extractor.push_to_hub(F'''MIT/{model_name}''' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""ast-finetuned-audioset-10-10-0.4593""",
type=str,
help="""Name of the Audio Spectrogram Transformer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCAmelCase_ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2
|
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , _lowercase : Optional[int] , _lowercase : Tuple=13 , _lowercase : str=7 , _lowercase : List[Any]=True , _lowercase : Optional[int]=True , _lowercase : str=True , _lowercase : Optional[int]=True , _lowercase : Dict=99 , _lowercase : List[Any]=32 , _lowercase : List[str]=5 , _lowercase : str=4 , _lowercase : int=37 , _lowercase : List[str]="gelu" , _lowercase : Any=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : Dict=512 , _lowercase : int=16 , _lowercase : Optional[Any]=2 , _lowercase : Dict=0.02 , _lowercase : List[Any]=4 , ):
"""simple docstring"""
_UpperCamelCase: List[str] = parent
_UpperCamelCase: int = batch_size
_UpperCamelCase: List[str] = seq_length
_UpperCamelCase: Optional[int] = is_training
_UpperCamelCase: Optional[Any] = use_attention_mask
_UpperCamelCase: Any = use_token_type_ids
_UpperCamelCase: List[str] = use_labels
_UpperCamelCase: Optional[int] = vocab_size
_UpperCamelCase: List[str] = hidden_size
_UpperCamelCase: Union[str, Any] = num_hidden_layers
_UpperCamelCase: Any = num_attention_heads
_UpperCamelCase: List[str] = intermediate_size
_UpperCamelCase: Union[str, Any] = hidden_act
_UpperCamelCase: Dict = hidden_dropout_prob
_UpperCamelCase: List[str] = attention_probs_dropout_prob
_UpperCamelCase: str = max_position_embeddings
_UpperCamelCase: Dict = type_vocab_size
_UpperCamelCase: Tuple = type_sequence_label_size
_UpperCamelCase: List[Any] = initializer_range
_UpperCamelCase: str = num_choices
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase: Any = None
if self.use_attention_mask:
_UpperCamelCase: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase: Optional[Any] = None
if self.use_token_type_ids:
_UpperCamelCase: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase: Tuple = AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: List[Any] = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase: int = config_and_inputs
_UpperCamelCase: int = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class __magic_name__ ( __a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : Any = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = FlaxAlbertModelTester(self )
@slow
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase: int = model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase: List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowercase )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Optional[int] = FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase: List[Any] = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
_UpperCamelCase: Optional[int] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase: Optional[int] = model(_lowercase , attention_mask=_lowercase )[0]
_UpperCamelCase: Tuple = (1, 11, 768)
self.assertEqual(output.shape , _lowercase )
_UpperCamelCase: Tuple = np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowercase , atol=1E-4 ) )
| 271
| 0
|
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A ( __lowercase ):
_snake_case =(DDIMParallelScheduler,)
_snake_case =(('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowerCAmelCase__ ( self: str , **_lowerCAmelCase: str ) -> str:
'''simple docstring'''
UpperCAmelCase_ ={
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**_lowerCAmelCase )
return config
def lowerCAmelCase__ ( self: int , **_lowerCAmelCase: Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.scheduler_classes[0]
UpperCAmelCase_ =self.get_scheduler_config(**_lowerCAmelCase )
UpperCAmelCase_ =scheduler_class(**_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =10, 0.0
UpperCAmelCase_ =self.dummy_model()
UpperCAmelCase_ =self.dummy_sample_deter
scheduler.set_timesteps(_lowerCAmelCase )
for t in scheduler.timesteps:
UpperCAmelCase_ =model(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase_ =scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
return sample
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[int]:
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> List[str]:
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowerCAmelCase )
UpperCAmelCase_ =self.scheduler_classes[0]
UpperCAmelCase_ =self.get_scheduler_config(steps_offset=1 )
UpperCAmelCase_ =scheduler_class(**_lowerCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowerCAmelCase__ ( self: int ) -> Dict:
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowerCAmelCase , beta_end=_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Any:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] ) -> Tuple:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Any:
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
self.check_over_configs(thresholding=_lowerCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowerCAmelCase , prediction_type=_lowerCAmelCase , sample_max_value=_lowerCAmelCase , )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[str]:
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_lowerCAmelCase , num_inference_steps=_lowerCAmelCase )
def lowerCAmelCase__ ( self: int ) -> Tuple:
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowerCAmelCase , eta=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.scheduler_classes[0]
UpperCAmelCase_ =self.get_scheduler_config()
UpperCAmelCase_ =scheduler_class(**_lowerCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.1_47_71 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.3_24_60 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1e-5
def lowerCAmelCase__ ( self: int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.scheduler_classes[0]
UpperCAmelCase_ =self.get_scheduler_config()
UpperCAmelCase_ =scheduler_class(**_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =10, 0.0
scheduler.set_timesteps(_lowerCAmelCase )
UpperCAmelCase_ =self.dummy_model()
UpperCAmelCase_ =self.dummy_sample_deter
UpperCAmelCase_ =self.dummy_sample_deter + 0.1
UpperCAmelCase_ =self.dummy_sample_deter - 0.1
UpperCAmelCase_ =samplea.shape[0]
UpperCAmelCase_ =torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCAmelCase_ =torch.arange(_lowerCAmelCase )[0:3, None].repeat(1 , _lowerCAmelCase )
UpperCAmelCase_ =model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCAmelCase_ =scheduler.batch_step_no_noise(_lowerCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowerCAmelCase )
UpperCAmelCase_ =torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCAmelCase_ =torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 11_47.79_04 ) < 1e-2
assert abs(result_mean.item() - 0.49_82 ) < 1e-3
def lowerCAmelCase__ ( self: Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.full_loop()
UpperCAmelCase_ =torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCAmelCase_ =torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_72.00_67 ) < 1e-2
assert abs(result_mean.item() - 0.22_39_67 ) < 1e-3
def lowerCAmelCase__ ( self: int ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.full_loop(prediction_type="v_prediction" )
UpperCAmelCase_ =torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCAmelCase_ =torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 52.53_02 ) < 1e-2
assert abs(result_mean.item() - 0.06_84 ) < 1e-3
def lowerCAmelCase__ ( self: Optional[Any] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
UpperCAmelCase_ =torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCAmelCase_ =torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.82_95 ) < 1e-2
assert abs(result_mean.item() - 0.19_51 ) < 1e-3
def lowerCAmelCase__ ( self: Optional[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.full_loop(set_alpha_to_one=_lowerCAmelCase , beta_start=0.01 )
UpperCAmelCase_ =torch.sum(torch.abs(_lowerCAmelCase ) )
UpperCAmelCase_ =torch.mean(torch.abs(_lowerCAmelCase ) )
assert abs(result_sum.item() - 1_49.07_84 ) < 1e-2
assert abs(result_mean.item() - 0.19_41 ) < 1e-3
| 550
|
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
__lowercase : Optional[int] =TypeVar("""T""")
def a__ ( lowercase__ ):
'''simple docstring'''
return (position - 1) // 2
def a__ ( lowercase__ ):
'''simple docstring'''
return (2 * position) + 1
def a__ ( lowercase__ ):
'''simple docstring'''
return (2 * position) + 2
class A ( Generic[T] ):
def __init__( self: List[str] ) -> None:
'''simple docstring'''
UpperCAmelCase_ =[]
UpperCAmelCase_ ={}
UpperCAmelCase_ =0
def __len__( self: Union[str, Any] ) -> int:
'''simple docstring'''
return self.elements
def __repr__( self: Dict ) -> str:
'''simple docstring'''
return str(self.heap )
def lowerCAmelCase__ ( self: Any ) -> bool:
'''simple docstring'''
return self.elements == 0
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.heap.append((elem, weight) )
UpperCAmelCase_ =self.elements
self.elements += 1
self._bubble_up(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Tuple ) -> T:
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[0]
self._bubble_down(_lowerCAmelCase )
return elem
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
UpperCAmelCase_ =(elem, weight)
if position > 0:
UpperCAmelCase_ =get_parent_position(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
else:
self._bubble_down(_lowerCAmelCase )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
if curr_pos == 0:
return None
UpperCAmelCase_ =get_parent_position(_lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[curr_pos]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_up(_lowerCAmelCase )
return None
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.position_map[elem]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[curr_pos]
UpperCAmelCase_ =get_child_left_position(_lowerCAmelCase )
UpperCAmelCase_ =get_child_right_position(_lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_left_position]
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
if child_left_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ =self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(_lowerCAmelCase , _lowerCAmelCase )
return self._bubble_down(_lowerCAmelCase )
return None
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: int , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
UpperCAmelCase_ =self.heap[nodea_pos][0]
UpperCAmelCase_ =self.heap[nodea_pos][0]
UpperCAmelCase_ , UpperCAmelCase_ =(
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCAmelCase_ =nodea_pos
UpperCAmelCase_ =nodea_pos
class A ( Generic[T] ):
def __init__( self: Tuple ) -> None:
'''simple docstring'''
UpperCAmelCase_ ={}
UpperCAmelCase_ =0
def __repr__( self: List[str] ) -> str:
'''simple docstring'''
return str(self.connections )
def __len__( self: Optional[Any] ) -> int:
'''simple docstring'''
return self.nodes
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: T ) -> None:
'''simple docstring'''
if node not in self.connections:
UpperCAmelCase_ ={}
self.nodes += 1
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: T , _lowerCAmelCase: T , _lowerCAmelCase: int ) -> None:
'''simple docstring'''
self.add_node(_lowerCAmelCase )
self.add_node(_lowerCAmelCase )
UpperCAmelCase_ =weight
UpperCAmelCase_ =weight
def a__ ( lowercase__ , ):
'''simple docstring'''
UpperCAmelCase_ ={node: maxsize for node in graph.connections}
UpperCAmelCase_ ={node: None for node in graph.connections}
UpperCAmelCase_ =MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowercase__ , lowercase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCAmelCase_ =priority_queue.extract_min()
UpperCAmelCase_ =0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
UpperCAmelCase_ =node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCAmelCase_ =priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ =dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowercase__ , dist[neighbour] )
UpperCAmelCase_ =node
return dist, parent
| 550
| 1
|
'''simple docstring'''
from .testing import (
are_the_same_tensors,
execute_subprocess_async,
require_bnb,
require_cpu,
require_cuda,
require_huggingface_suite,
require_mps,
require_multi_gpu,
require_multi_xpu,
require_safetensors,
require_single_gpu,
require_single_xpu,
require_torch_min_version,
require_tpu,
require_xpu,
skip,
slow,
)
from .training import RegressionDataset, RegressionModel, RegressionModelaXPU
from .scripts import test_script, test_sync, test_ops # isort: skip
| 407
|
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
__A =[
'good first issue',
'feature request',
'wip',
]
def _UpperCamelCase ( ):
UpperCAmelCase__ : List[str] = Github(os.environ["""GITHUB_TOKEN"""] )
UpperCAmelCase__ : Dict = g.get_repo("""huggingface/accelerate""" )
UpperCAmelCase__ : Any = repo.get_issues(state="""open""" )
for issue in open_issues:
UpperCAmelCase__ : Tuple = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCamelCase__ : i.created_at , reverse=UpperCamelCase__ )
UpperCAmelCase__ : Any = comments[0] if len(UpperCamelCase__ ) > 0 else None
UpperCAmelCase__ : Optional[Any] = dt.utcnow()
UpperCAmelCase__ : Optional[int] = (current_time - issue.updated_at).days
UpperCAmelCase__ : int = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state="""closed""" )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
if __name__ == "__main__":
main()
| 407
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=3 , __UpperCAmelCase=32 , __UpperCAmelCase=3 , __UpperCAmelCase=10 , __UpperCAmelCase=[10, 20, 30, 40] , __UpperCAmelCase=[1, 1, 2, 1] , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase="relu" , __UpperCAmelCase=3 , __UpperCAmelCase=None , ) ->Any:
a_ = parent
a_ = batch_size
a_ = image_size
a_ = num_channels
a_ = embeddings_size
a_ = hidden_sizes
a_ = depths
a_ = is_training
a_ = use_labels
a_ = hidden_act
a_ = num_labels
a_ = scope
a_ = len(__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.num_labels)
a_ = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self) ->int:
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[Any]:
a_ = RegNetModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->List[str]:
a_ = self.num_labels
a_ = RegNetForImageClassification(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = self.prepare_config_and_inputs()
a_ , a_ , a_ = config_and_inputs
a_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : int = (RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
a_ : Any = (
{"""feature-extraction""": RegNetModel, """image-classification""": RegNetForImageClassification}
if is_torch_available()
else {}
)
a_ : Tuple = False
a_ : Optional[int] = False
a_ : Union[str, Any] = False
a_ : Tuple = False
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = RegNetModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , has_text_modality=__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self) ->List[str]:
return
@unittest.skip(reason="RegNet does not use inputs_embeds")
def UpperCAmelCase__ ( self) ->Union[str, Any]:
pass
@unittest.skip(reason="RegNet does not support input and output embeddings")
def UpperCAmelCase__ ( self) ->Any:
pass
def UpperCAmelCase__ ( self) ->Dict:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(__UpperCAmelCase)
a_ = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ = [*signature.parameters.keys()]
a_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ = model_class(config=__UpperCAmelCase)
for name, module in model.named_modules():
if isinstance(__UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm)):
self.assertTrue(
torch.all(module.weight == 1) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
self.assertTrue(
torch.all(module.bias == 0) , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
def UpperCAmelCase__ ( self) ->str:
def check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase):
a_ = model_class(__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
with torch.no_grad():
a_ = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase))
a_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCAmelCase) , expected_num_stages + 1)
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
a_ , a_ = self.model_tester.prepare_config_and_inputs_for_common()
a_ = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
a_ = layer_type
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ = True
check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase)
def UpperCAmelCase__ ( self) ->int:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = RegNetModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
def UpperCamelCase ( ) ->Any:
"""simple docstring"""
a_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self) ->Tuple:
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0])
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self) ->int:
a_ = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to(__UpperCAmelCase)
a_ = self.default_image_processor
a_ = prepare_img()
a_ = image_processor(images=__UpperCAmelCase , return_tensors="pt").to(__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(**__UpperCAmelCase)
# verify the logits
a_ = torch.Size((1, 10_00))
self.assertEqual(outputs.logits.shape , __UpperCAmelCase)
a_ = torch.tensor([-0.4_180, -1.5_051, -3.4_836]).to(__UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCAmelCase , atol=1E-4))
| 210
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class snake_case ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : Tuple = LDMTextToImagePipeline
a_ : Tuple = TEXT_TO_IMAGE_PARAMS - {
"""negative_prompt""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
"""prompt_embeds""",
}
a_ : Optional[int] = PipelineTesterMixin.required_optional_params - {
"""num_images_per_prompt""",
"""callback""",
"""callback_steps""",
}
a_ : Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS
a_ : int = False
def UpperCAmelCase__ ( self) ->Tuple:
torch.manual_seed(0)
a_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
a_ = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0)
a_ = AutoencoderKL(
block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0)
a_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
a_ = CLIPTextModel(__UpperCAmelCase)
a_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
a_ = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=0) ->Optional[Any]:
if str(__UpperCAmelCase).startswith("mps"):
a_ = torch.manual_seed(__UpperCAmelCase)
else:
a_ = torch.Generator(device=__UpperCAmelCase).manual_seed(__UpperCAmelCase)
a_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = "cpu" # ensure determinism for the device-dependent torch.Generator
a_ = self.get_dummy_components()
a_ = LDMTextToImagePipeline(**__UpperCAmelCase)
pipe.to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_dummy_inputs(__UpperCAmelCase)
a_ = pipe(**__UpperCAmelCase).images
a_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 16, 16, 3)
a_ = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-3
@slow
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0) ->Union[str, Any]:
a_ = torch.manual_seed(__UpperCAmelCase)
a_ = np.random.RandomState(__UpperCAmelCase).standard_normal((1, 4, 32, 32))
a_ = torch.from_numpy(__UpperCAmelCase).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase)
a_ = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self) ->str:
a_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_inputs(__UpperCAmelCase)
a_ = pipe(**__UpperCAmelCase).images
a_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_56, 2_56, 3)
a_ = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878])
a_ = np.abs(expected_slice - image_slice).max()
assert max_diff < 1E-3
@nightly
@require_torch_gpu
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->Union[str, Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase=torch.floataa , __UpperCAmelCase=0) ->List[Any]:
a_ = torch.manual_seed(__UpperCAmelCase)
a_ = np.random.RandomState(__UpperCAmelCase).standard_normal((1, 4, 32, 32))
a_ = torch.from_numpy(__UpperCAmelCase).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase)
a_ = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 50,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def UpperCAmelCase__ ( self) ->Optional[Any]:
a_ = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256").to(__UpperCAmelCase)
pipe.set_progress_bar_config(disable=__UpperCAmelCase)
a_ = self.get_inputs(__UpperCAmelCase)
a_ = pipe(**__UpperCAmelCase).images[0]
a_ = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy")
a_ = np.abs(expected_image - image).max()
assert max_diff < 1E-3
| 210
| 1
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase_ :
def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=[1, 1, 2] , UpperCamelCase_=1 , UpperCamelCase_=32 , UpperCamelCase_=4 , UpperCamelCase_=8 , UpperCamelCase_=37 , UpperCamelCase_="gelu_new" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=5_12 , UpperCamelCase_=3 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , UpperCamelCase_=False , ) -> Tuple:
__lowercase : int = parent
__lowercase : Optional[int] = batch_size
__lowercase : Dict = seq_length
__lowercase : int = is_training
__lowercase : int = use_input_mask
__lowercase : str = use_token_type_ids
__lowercase : List[Any] = use_labels
__lowercase : Union[str, Any] = vocab_size
__lowercase : Any = block_sizes
__lowercase : Optional[Any] = num_decoder_layers
__lowercase : Union[str, Any] = d_model
__lowercase : str = n_head
__lowercase : List[str] = d_head
__lowercase : str = d_inner
__lowercase : Tuple = hidden_act
__lowercase : str = hidden_dropout
__lowercase : Optional[Any] = attention_dropout
__lowercase : Dict = activation_dropout
__lowercase : Dict = max_position_embeddings
__lowercase : int = type_vocab_size
__lowercase : List[Any] = 2
__lowercase : Optional[int] = num_labels
__lowercase : int = num_choices
__lowercase : List[Any] = scope
__lowercase : Optional[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
__lowercase : List[str] = n_head
# Used in the tests to check the size of the first hidden state
__lowercase : Optional[int] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
__lowercase : Any = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
__lowercase : Dict = self.num_hidden_layers + 2
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowercase : Optional[int] = None
if self.use_input_mask:
__lowercase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowercase : Optional[int] = None
if self.use_token_type_ids:
__lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowercase : List[Any] = None
__lowercase : str = None
__lowercase : Any = None
if self.use_labels:
__lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowercase : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowercase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowercase : Optional[int] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Dict:
__lowercase : Any = TFFunnelModel(config=UpperCamelCase_ )
__lowercase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase : str = model(UpperCamelCase_ )
__lowercase : int = [input_ids, input_mask]
__lowercase : Optional[int] = model(UpperCamelCase_ )
__lowercase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : int = False
__lowercase : List[str] = TFFunnelModel(config=UpperCamelCase_ )
__lowercase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
__lowercase : Tuple = False
__lowercase : Any = TFFunnelModel(config=UpperCamelCase_ )
__lowercase : str = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
__lowercase : Optional[int] = TFFunnelBaseModel(config=UpperCamelCase_ )
__lowercase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase : Any = model(UpperCamelCase_ )
__lowercase : List[Any] = [input_ids, input_mask]
__lowercase : Any = model(UpperCamelCase_ )
__lowercase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
__lowercase : Any = False
__lowercase : Optional[Any] = TFFunnelBaseModel(config=UpperCamelCase_ )
__lowercase : Optional[int] = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
__lowercase : Any = False
__lowercase : str = TFFunnelBaseModel(config=UpperCamelCase_ )
__lowercase : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[Any]:
__lowercase : List[str] = TFFunnelForPreTraining(config=UpperCamelCase_ )
__lowercase : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Any:
__lowercase : str = TFFunnelForMaskedLM(config=UpperCamelCase_ )
__lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase : Any = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Tuple:
__lowercase : int = self.num_labels
__lowercase : List[str] = TFFunnelForSequenceClassification(config=UpperCamelCase_ )
__lowercase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase : Tuple = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Dict:
__lowercase : Optional[int] = self.num_choices
__lowercase : List[Any] = TFFunnelForMultipleChoice(config=UpperCamelCase_ )
__lowercase : Union[str, Any] = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Tuple = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__lowercase : str = tf.tile(tf.expand_dims(UpperCamelCase_ , 1 ) , (1, self.num_choices, 1) )
__lowercase : Optional[Any] = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__lowercase : List[str] = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]:
__lowercase : str = self.num_labels
__lowercase : Optional[Any] = TFFunnelForTokenClassification(config=UpperCamelCase_ )
__lowercase : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase : str = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Optional[Any]:
__lowercase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase_ )
__lowercase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__lowercase : List[Any] = model(UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Tuple = self.prepare_config_and_inputs()
(
(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,(
__lowercase
) ,
) : str = config_and_inputs
__lowercase : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ):
UpperCamelCase =(
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCamelCase =(
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> int:
__lowercase : Optional[int] = TFFunnelModelTester(self )
__lowercase : Any = ConfigTester(self , config_class=UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Any:
__lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCamelCase_ )
@require_tf
class UpperCAmelCase_ ( snake_case , unittest.TestCase ):
UpperCamelCase =(
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase =False
UpperCamelCase =False
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase_ )
__lowercase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase_ )
| 76
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''',
'''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''',
'''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''',
'''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''',
'''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''',
'''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''',
'''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''',
'''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''',
'''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''',
'''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''',
'''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''',
'''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''',
}
class __a ( __a ):
'''simple docstring'''
_lowerCamelCase : List[Any] = """codegen"""
_lowerCamelCase : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _lowerCamelCase=50_400 , _lowerCamelCase=2_048 , _lowerCamelCase=2_048 , _lowerCamelCase=4_096 , _lowerCamelCase=28 , _lowerCamelCase=16 , _lowerCamelCase=64 , _lowerCamelCase=None , _lowerCamelCase="gelu_new" , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=0.0 , _lowerCamelCase=1e-5 , _lowerCamelCase=0.02 , _lowerCamelCase=True , _lowerCamelCase=50_256 , _lowerCamelCase=50_256 , _lowerCamelCase=False , **_lowerCamelCase , ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = vocab_size
__lowercase = n_ctx
__lowercase = n_positions
__lowercase = n_embd
__lowercase = n_layer
__lowercase = n_head
__lowercase = n_inner
__lowercase = rotary_dim
__lowercase = activation_function
__lowercase = resid_pdrop
__lowercase = embd_pdrop
__lowercase = attn_pdrop
__lowercase = layer_norm_epsilon
__lowercase = initializer_range
__lowercase = use_cache
__lowercase = bos_token_id
__lowercase = eos_token_id
super().__init__(
bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class __a ( __a ):
'''simple docstring'''
def __init__( self , _lowerCamelCase , _lowerCamelCase = "default" , _lowerCamelCase = None , _lowerCamelCase = False , ) -> str:
'''simple docstring'''
super().__init__(_lowerCamelCase , task=_lowerCamelCase , patching_specs=_lowerCamelCase , use_past=_lowerCamelCase )
if not getattr(self._config , "pad_token_id" , _lowerCamelCase ):
# TODO: how to do that better?
__lowercase = 0
@property
def SCREAMING_SNAKE_CASE ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
__lowercase = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(_lowerCamelCase , direction="inputs" )
__lowercase = {0: "batch", 1: "past_sequence + sequence"}
else:
__lowercase = {0: "batch", 1: "sequence"}
return common_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.n_layer
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return self._config.n_head
def SCREAMING_SNAKE_CASE ( self , _lowerCamelCase , _lowerCamelCase = -1 , _lowerCamelCase = -1 , _lowerCamelCase = False , _lowerCamelCase = None , ) -> Mapping[str, Any]:
'''simple docstring'''
__lowercase = super(_lowerCamelCase , self ).generate_dummy_inputs(
_lowerCamelCase , batch_size=_lowerCamelCase , seq_length=_lowerCamelCase , is_pair=_lowerCamelCase , framework=_lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__lowercase = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__lowercase , __lowercase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__lowercase = seqlen + 2
__lowercase = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase = [
(torch.zeros(_lowerCamelCase ), torch.zeros(_lowerCamelCase )) for _ in range(self.num_layers )
]
__lowercase = common_inputs["attention_mask"]
if self.use_past:
__lowercase = ordered_inputs["attention_mask"].dtype
__lowercase = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(_lowerCamelCase , _lowerCamelCase , dtype=_lowerCamelCase )] , dim=1 )
return ordered_inputs
@property
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
return 13
| 118
| 0
|
"""simple docstring"""
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase = model_name.find("patch" )
UpperCamelCase = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
UpperCamelCase = XCLIPVisionConfig(patch_size=_SCREAMING_SNAKE_CASE , num_frames=_SCREAMING_SNAKE_CASE )
if "large" in model_name:
UpperCamelCase = 768
UpperCamelCase = 3_072
UpperCamelCase = 12
UpperCamelCase = 1_024
UpperCamelCase = 4_096
UpperCamelCase = 16
UpperCamelCase = 24
UpperCamelCase = 768
UpperCamelCase = 3_072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = 336
UpperCamelCase = XCLIPConfig.from_text_vision_configs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if "large" in model_name:
UpperCamelCase = 768
return config
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if name == "token_embedding.weight":
UpperCamelCase = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
UpperCamelCase = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
UpperCamelCase = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCamelCase = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCamelCase = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCamelCase = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
UpperCamelCase = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
UpperCamelCase = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
UpperCamelCase = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
UpperCamelCase = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
UpperCamelCase = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
UpperCamelCase = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
UpperCamelCase = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
UpperCamelCase = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
UpperCamelCase = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
UpperCamelCase = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
UpperCamelCase = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
UpperCamelCase = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
UpperCamelCase = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if "attn.in_proj" in key:
UpperCamelCase = key.split("." )
if key.startswith("visual" ):
UpperCamelCase = key_split[3]
UpperCamelCase = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[
:dim
]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase = val[
:dim, :
]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[
-dim:, :
]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
elif key.startswith("mit" ):
UpperCamelCase = key_split[2]
UpperCamelCase = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = key_split[2]
UpperCamelCase = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[
dim : dim * 2, :
]
UpperCamelCase = val[-dim:, :]
else:
UpperCamelCase = val[:dim]
UpperCamelCase = val[
dim : dim * 2
]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = rename_key(_SCREAMING_SNAKE_CASE )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase = val.T
UpperCamelCase = val
return orig_state_dict
def a__ ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if num_frames == 8:
UpperCamelCase = "eating_spaghetti_8_frames.npy"
elif num_frames == 16:
UpperCamelCase = "eating_spaghetti.npy"
elif num_frames == 32:
UpperCamelCase = "eating_spaghetti_32_frames.npy"
UpperCamelCase = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=_SCREAMING_SNAKE_CASE , repo_type="dataset" , )
UpperCamelCase = np.load(_SCREAMING_SNAKE_CASE )
return list(_SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
UpperCamelCase = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCamelCase = model_to_url[model_name]
UpperCamelCase = 8
if "16-frames" in model_name:
UpperCamelCase = 16
elif "shot" in model_name:
UpperCamelCase = 32
UpperCamelCase = get_xclip_config(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = XCLIPModel(_SCREAMING_SNAKE_CASE )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase = "pytorch_model.bin"
gdown.cached_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , quiet=_SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )["model"]
else:
UpperCamelCase = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE )["model"]
UpperCamelCase = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = XCLIPModel(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase = 336 if model_name == "xclip-large-patch14-16-frames" else 224
UpperCamelCase = VideoMAEImageProcessor(size=_SCREAMING_SNAKE_CASE )
UpperCamelCase = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
UpperCamelCase = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
UpperCamelCase = XCLIPProcessor(image_processor=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE )
UpperCamelCase = prepare_video(_SCREAMING_SNAKE_CASE )
UpperCamelCase = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=_SCREAMING_SNAKE_CASE , return_tensors="pt" , padding=_SCREAMING_SNAKE_CASE )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# Verify outputs
UpperCamelCase = outputs.logits_per_video
UpperCamelCase = logits_per_video.softmax(dim=1 )
print("Probs:" , _SCREAMING_SNAKE_CASE )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="nielsr" )
processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="nielsr" )
slow_tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE , organization="nielsr" )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
lowerCAmelCase__ = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 544
|
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class _lowerCamelCase ( unittest.TestCase ):
UpperCAmelCase_ = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def snake_case_ (self , __a , __a , __a ) -> str:
UpperCamelCase = hf_hub_download(
repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase = VideoClassificationPipeline(model=__a , image_processor=__a , top_k=2 )
UpperCamelCase = [
example_video_filepath,
"https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4",
]
return video_classifier, examples
def snake_case_ (self , __a , __a ) -> str:
for example in examples:
UpperCamelCase = video_classifier(__a )
self.assertEqual(
__a , [
{"score": ANY(__a ), "label": ANY(__a )},
{"score": ANY(__a ), "label": ANY(__a )},
] , )
@require_torch
def snake_case_ (self ) -> int:
UpperCamelCase = "hf-internal-testing/tiny-random-VideoMAEForVideoClassification"
UpperCamelCase = VideoMAEFeatureExtractor(
size={"shortest_edge": 10} , crop_size={"height": 10, "width": 10} )
UpperCamelCase = pipeline(
"video-classification" , model=__a , feature_extractor=__a , frame_sampling_rate=4 )
UpperCamelCase = hf_hub_download(repo_id="nateraw/video-demo" , filename="archery.mp4" , repo_type="dataset" )
UpperCamelCase = video_classifier(__a , top_k=2 )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}] , )
UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(__a , decimals=4 ) , [
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
[{"score": 0.5199, "label": "LABEL_0"}, {"score": 0.4801, "label": "LABEL_1"}],
] , )
@require_tf
def snake_case_ (self ) -> Optional[Any]:
pass
| 544
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__a: Dict = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[str] = ['''DeiTFeatureExtractor''']
__a: Union[str, Any] = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Dict = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: List[Any] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
__a: List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 108
|
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> str:
"""simple docstring"""
_UpperCAmelCase = psutil.Process()
_UpperCAmelCase = False
def lowerCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_UpperCAmelCase = -1
while True:
_UpperCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = True
_UpperCAmelCase = threading.Thread(target=self.peak_monitor )
_UpperCAmelCase = True
self.thread.start()
def lowerCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
__a: Union[str, Any] = PeakCPUMemory()
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
# Time
_UpperCAmelCase = {"""time""": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase = torch.cuda.memory_allocated(__snake_case )
torch.cuda.reset_peak_memory_stats()
return measures
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> Union[str, Any]:
# Time
_UpperCAmelCase = {"""time""": time.time() - start_measures["""time"""]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
_UpperCAmelCase = (psutil.Process().memory_info().rss - start_measures["""cpu"""]) / 2**2_0
_UpperCAmelCase = (cpu_peak_tracker.stop() - start_measures["""cpu"""]) / 2**2_0
# GPU mem
for i in range(torch.cuda.device_count() ):
_UpperCAmelCase = (torch.cuda.memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**2_0
_UpperCAmelCase = (torch.cuda.max_memory_allocated(__snake_case ) - start_measures[str(__snake_case )]) / 2**2_0
return measures
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> str:
print(f"""{description}:""" )
print(f"""- Time: {measures["time"]:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(f"""- GPU {i} allocated: {measures[str(__snake_case )]:.2f}MiB""" )
_UpperCAmelCase = measures[f"""{i}-peak"""]
print(f"""- GPU {i} peak: {peak:.2f}MiB""" )
print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" )
print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
| 108
| 1
|
"""simple docstring"""
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , __lowercase , __lowercase=3 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=False , __lowercase=True , __lowercase=99 , __lowercase=32 , __lowercase=5 , __lowercase=4 , __lowercase=37 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=512 , __lowercase=16 , __lowercase=2 , __lowercase=0.02 , __lowercase=3 , __lowercase=4 , __lowercase=None , ):
UpperCAmelCase__ = parent
UpperCAmelCase__ = batch_size
UpperCAmelCase__ = seq_length
UpperCAmelCase__ = is_training
UpperCAmelCase__ = use_input_mask
UpperCAmelCase__ = use_token_type_ids
UpperCAmelCase__ = use_labels
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = type_sequence_label_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = num_choices
UpperCAmelCase__ = scope
def A__ ( self ):
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ = None
if self.use_input_mask:
UpperCAmelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
if self.use_labels:
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase__ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase__ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ):
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=__lowercase , )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = FalconModel(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase )
UpperCAmelCase__ = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = True
UpperCAmelCase__ = FalconModel(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , )
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , )
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = FalconForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCAmelCase__ = True
UpperCAmelCase__ = True
UpperCAmelCase__ = FalconForCausalLM(config=__lowercase )
model.to(__lowercase )
model.eval()
# first forward pass
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , use_cache=__lowercase , )
UpperCAmelCase__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase__ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase__ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , output_hidden_states=__lowercase , )["""hidden_states"""][0]
UpperCAmelCase__ = model(
__lowercase , attention_mask=__lowercase , encoder_hidden_states=__lowercase , encoder_attention_mask=__lowercase , past_key_values=__lowercase , output_hidden_states=__lowercase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase__ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def A__ ( self ):
UpperCAmelCase__ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) = config_and_inputs
UpperCAmelCase__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _UpperCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowercase : Dict = (FalconForCausalLM,) if is_torch_available() else ()
__lowercase : int = (
{
'feature-extraction': FalconModel,
'text-classification': FalconForSequenceClassification,
'text-generation': FalconForCausalLM,
'question-answering': FalconForQuestionAnswering,
'token-classification': FalconForTokenClassification,
'zero-shot': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : int = False
def A__ ( self ):
UpperCAmelCase__ = FalconModelTester(self )
UpperCAmelCase__ = ConfigTester(self , config_class=__lowercase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def A__ ( self ):
UpperCAmelCase__ , *UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
UpperCAmelCase__ = alibi
self.model_tester.create_and_check_model(__lowercase , *__lowercase )
def A__ ( self ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = input_dict["""input_ids"""]
UpperCAmelCase__ = input_ids.ne(1 ).to(__lowercase )
UpperCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase__ = FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A__ ( self ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = """single_label_classification"""
UpperCAmelCase__ = input_dict["""input_ids"""]
UpperCAmelCase__ = input_ids.ne(1 ).to(__lowercase )
UpperCAmelCase__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
UpperCAmelCase__ = FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A__ ( self ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = input_dict["""input_ids"""]
UpperCAmelCase__ = FalconForCausalLM(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , use_cache=__lowercase )
UpperCAmelCase__ = input_ids.shape[0]
UpperCAmelCase__ = model._convert_to_rw_cache(result.past_key_values )
UpperCAmelCase__ = model._convert_cache_to_standard_format(__lowercase , __lowercase )
for layer in range(len(__lowercase ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A__ ( self ):
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ = 3
UpperCAmelCase__ = """multi_label_classification"""
UpperCAmelCase__ = input_dict["""input_ids"""]
UpperCAmelCase__ = input_ids.ne(1 ).to(__lowercase )
UpperCAmelCase__ = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
UpperCAmelCase__ = FalconForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
UpperCAmelCase__ = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A__ ( self ):
# Falcon can have different numbers of KV-heads than the number of query heads, so we need
# to override this test to use the right head counts.
for model_class in self.all_generative_model_classes:
UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(__lowercase , """use_cache""" ):
return
UpperCAmelCase__ = model_class(__lowercase ).to(__lowercase )
if "use_cache" not in inputs:
UpperCAmelCase__ = True
UpperCAmelCase__ = model(**__lowercase )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
UpperCAmelCase__ = (
getattr(__lowercase , """decoder_layers""" , __lowercase )
or getattr(__lowercase , """num_decoder_layers""" , __lowercase )
or config.num_hidden_layers
)
UpperCAmelCase__ = getattr(__lowercase , """num_kv_heads""" , config.num_attention_heads )
UpperCAmelCase__ = getattr(__lowercase , """d_model""" , config.hidden_size )
UpperCAmelCase__ = embed_dim // num_attention_heads
UpperCAmelCase__ = outputs["""past_key_values"""]
self.assertEqual(len(__lowercase ) , __lowercase )
UpperCAmelCase__ , UpperCAmelCase__ = inputs["""input_ids"""].shape
for i in range(__lowercase ):
if config.new_decoder_architecture:
UpperCAmelCase__ = config.num_attention_heads
elif config.multi_query:
UpperCAmelCase__ = 1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase__ = AutoTokenizer.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
UpperCAmelCase__ = FalconForCausalLM.from_pretrained("""Rocketknight1/falcon-rw-1b""" )
model.eval()
model.to(__lowercase )
UpperCAmelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowercase )
UpperCAmelCase__ = (
"""My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday."""
)
UpperCAmelCase__ = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=19 )
UpperCAmelCase__ = tokenizer.batch_decode(__lowercase )[0]
self.assertEqual(__lowercase , __lowercase )
@slow
def A__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__lowercase )
UpperCAmelCase__ = FalconForCausalLM.from_pretrained(__lowercase )
model.eval()
model.to(__lowercase )
UpperCAmelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowercase )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=4 )
model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=4 )
model.generate(**__lowercase , num_beams=2 , max_new_tokens=4 )
@slow
def A__ ( self ):
# The big models are way too big for the CI, so we use tiny random models that resemble their
# architectures but with much smaller and fewer layers
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
UpperCAmelCase__ = AutoTokenizer.from_pretrained(__lowercase )
UpperCAmelCase__ = FalconForCausalLM.from_pretrained(__lowercase )
model.eval()
model.to(device=__lowercase )
UpperCAmelCase__ = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(__lowercase )
# Test results are the same with and without cache
UpperCAmelCase__ = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=20 , use_cache=__lowercase )
UpperCAmelCase__ = model.generate(**__lowercase , do_sample=__lowercase , max_new_tokens=20 , use_cache=__lowercase )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 422
|
"""simple docstring"""
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
a : Tuple = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , __lowercase ):
super().__init__()
UpperCAmelCase__ = torchvision.models.resnetaaa(pretrained=__lowercase )
UpperCAmelCase__ = list(model.children() )[:-2]
UpperCAmelCase__ = nn.Sequential(*__lowercase )
UpperCAmelCase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def A__ ( self , __lowercase ):
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase__ = self.pool(self.model(__lowercase ) )
UpperCAmelCase__ = torch.flatten(__lowercase , start_dim=2 )
UpperCAmelCase__ = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class _UpperCamelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
UpperCAmelCase__ = [json.loads(__lowercase ) for l in open(__lowercase )]
UpperCAmelCase__ = os.path.dirname(__lowercase )
UpperCAmelCase__ = tokenizer
UpperCAmelCase__ = labels
UpperCAmelCase__ = len(__lowercase )
UpperCAmelCase__ = max_seq_length
UpperCAmelCase__ = transforms
def __len__( self ):
return len(self.data )
def __getitem__( self , __lowercase ):
UpperCAmelCase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=__lowercase ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase__ = sentence[: self.max_seq_length]
UpperCAmelCase__ = torch.zeros(self.n_classes )
UpperCAmelCase__ = 1
UpperCAmelCase__ = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" )
UpperCAmelCase__ = self.transforms(__lowercase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def A__ ( self ):
UpperCAmelCase__ = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def snake_case__ ( _SCREAMING_SNAKE_CASE ) ->List[str]:
UpperCAmelCase__ = [len(row["""sentence"""] ) for row in batch]
UpperCAmelCase__ , UpperCAmelCase__ = len(_SCREAMING_SNAKE_CASE ), max(_SCREAMING_SNAKE_CASE )
UpperCAmelCase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
UpperCAmelCase__ = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
UpperCAmelCase__ = input_row["""sentence"""]
UpperCAmelCase__ = 1
UpperCAmelCase__ = torch.stack([row["""image"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""label"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""image_start_token"""] for row in batch] )
UpperCAmelCase__ = torch.stack([row["""image_end_token"""] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def snake_case__ ( ) ->int:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def snake_case__ ( ) ->str:
return transforms.Compose(
[
transforms.Resize(2_5_6 ),
transforms.CenterCrop(2_2_4 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
] )
| 422
| 1
|
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Dict = logging.get_logger(__name__)
# TODO: upload to AWS
a : Tuple = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class a_ ( _UpperCAmelCase ):
a : int = 'retribert'
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any]=3_05_22 , __UpperCamelCase : int=7_68 , __UpperCamelCase : Any=8 , __UpperCamelCase : Dict=12 , __UpperCamelCase : List[Any]=30_72 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Union[str, Any]=5_12 , __UpperCamelCase : Dict=2 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : List[str]=1e-12 , __UpperCamelCase : int=True , __UpperCamelCase : str=1_28 , __UpperCamelCase : Union[str, Any]=0 , **__UpperCamelCase : Any , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = share_encoders
_UpperCAmelCase = projection_dim
| 555
|
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
a : Dict = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : Dict = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.multihead_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def _UpperCamelCase ( _A , _A , _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = state_dict.pop(_A )
_UpperCAmelCase = val
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
return new_state_dict
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = """"""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
_UpperCAmelCase = state_dict.pop(
F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
_UpperCAmelCase = in_proj_weight_cross_attn[:2_5_6, :]
_UpperCAmelCase = in_proj_bias_cross_attn[:2_5_6]
_UpperCAmelCase = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias_cross_attn[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight_cross_attn[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias_cross_attn[-2_5_6:]
def _UpperCamelCase ( _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = image.size
_UpperCAmelCase = max(_A , _A )
_UpperCAmelCase = 8_0_0 if """detection""" in checkpoint_url else 1_0_0_0
_UpperCAmelCase = target_max_size / current_max_size
_UpperCAmelCase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = F.to_tensor(_A )
_UpperCAmelCase = F.normalize(_A , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
logger.info("""Converting model...""" )
# load original state dict
_UpperCAmelCase = torch.hub.load_state_dict_from_url(_A , map_location="""cpu""" )
# rename keys
for src, dest in rename_keys:
rename_key(_A , _A , _A )
_UpperCAmelCase = rename_backbone_keys(_A )
# query, key and value matrices need special treatment
read_in_q_k_v(_A )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase = """model."""
for key in state_dict.copy().keys():
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_UpperCAmelCase = state_dict.pop(_A )
_UpperCAmelCase = val
# create HuggingFace model and load state dict
_UpperCAmelCase = TableTransformerConfig(
backbone="""resnet18""" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
_UpperCAmelCase = 1_5
_UpperCAmelCase = 2
_UpperCAmelCase = {0: """table""", 1: """table rotated"""}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
else:
_UpperCAmelCase = 1_2_5
_UpperCAmelCase = 6
_UpperCAmelCase = {
0: """table""",
1: """table column""",
2: """table row""",
3: """table column header""",
4: """table projected row header""",
5: """table spanning cell""",
}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
_UpperCAmelCase = DetrImageProcessor(
format="""coco_detection""" , max_size=8_0_0 if """detection""" in checkpoint_url else 1_0_0_0 )
_UpperCAmelCase = TableTransformerForObjectDetection(_A )
model.load_state_dict(_A )
model.eval()
# verify our conversion
_UpperCAmelCase = """example_pdf.png""" if """detection""" in checkpoint_url else """example_table.png"""
_UpperCAmelCase = hf_hub_download(repo_id="""nielsr/example-pdf""" , repo_type="""dataset""" , filename=_A )
_UpperCAmelCase = Image.open(_A ).convert("""RGB""" )
_UpperCAmelCase = normalize(resize(_A , _A ) ).unsqueeze(0 )
_UpperCAmelCase = model(_A )
if "detection" in checkpoint_url:
_UpperCAmelCase = (1, 1_5, 3)
_UpperCAmelCase = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
_UpperCAmelCase = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
_UpperCAmelCase = (1, 1_2_5, 7)
_UpperCAmelCase = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
_UpperCAmelCase = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _A , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _A , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(_A ).mkdir(exist_ok=_A )
model.save_pretrained(_A )
image_processor.save_pretrained(_A )
if push_to_hub:
# Push model to HF hub
logger.info("""Pushing model to the hub...""" )
_UpperCAmelCase = (
"""microsoft/table-transformer-detection"""
if """detection""" in checkpoint_url
else """microsoft/table-transformer-structure-recognition"""
)
model.push_to_hub(_A )
image_processor.push_to_hub(_A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
a : int = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 555
| 1
|
_A = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
_A = [{'type': 'code', 'content': INSTALL_CONTENT}]
_A = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 719
|
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : int ) -> bool:
"""simple docstring"""
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
a_ = 4
a_ = (1 << p) - 1
for _ in range(p - 2 ):
a_ = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 403
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 10_00 ) -> int:
'''simple docstring'''
lowercase_ = 2**power
lowercase_ = 0
while n:
lowercase_ , lowercase_ = r + n % 10, n // 10
return r
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 567
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def _SCREAMING_SNAKE_CASE () -> Generator[int, None, None]:
'''simple docstring'''
lowercase_ = {}
lowercase_ = 2
while True:
lowercase_ = factor_map.pop(__lowerCAmelCase , __lowerCAmelCase )
if factor:
lowercase_ = factor + prime
while x in factor_map:
x += factor
lowercase_ = factor
else:
lowercase_ = prime
yield prime
prime += 1
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase = 1E10 ) -> int:
'''simple docstring'''
lowercase_ = sieve()
lowercase_ = 1
while True:
lowercase_ = next(__lowerCAmelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowerCAmelCase )
n += 2
if __name__ == "__main__":
print(solution())
| 567
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCamelCase = datasets.utils.logging.get_logger(__name__)
__lowerCamelCase = ["names", "prefix"]
__lowerCamelCase = ["warn_bad_lines", "error_bad_lines", "mangle_dupe_cols"]
__lowerCamelCase = ["encoding_errors", "on_bad_lines"]
__lowerCamelCase = ["date_format"]
@dataclass
class UpperCamelCase__( datasets.BuilderConfig ):
lowerCAmelCase__ : List[Any] = ','
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : int = 'infer'
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Union[str, Any] = '.'
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Union[str, Any] = '"'
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : Optional[int] = True
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Tuple = 1_0000
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : List[str] = 'strict'
lowerCAmelCase__ : List[Any] = 'error'
lowerCAmelCase__ : Dict = None
def snake_case__ ( self ) -> int:
if self.delimiter is not None:
A__ = self.delimiter
if self.column_names is not None:
A__ = self.column_names
@property
def snake_case__ ( self ) -> Any:
A__ = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,__UpperCAmelCase ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class UpperCamelCase__( datasets.ArrowBasedBuilder ):
lowerCAmelCase__ : Union[str, Any] = CsvConfig
def snake_case__ ( self ) -> int:
return datasets.DatasetInfo(features=self.config.features )
def snake_case__ ( self ,__UpperCAmelCase ) -> int:
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
A__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__UpperCAmelCase ,(str, list, tuple) ):
A__ = data_files
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = [files]
A__ = [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
A__ = []
for split_name, files in data_files.items():
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
A__ = [files]
A__ = [dl_manager.iter_files(__UpperCAmelCase ) for file in files]
splits.append(datasets.SplitGenerator(name=__UpperCAmelCase ,gen_kwargs={'files': files} ) )
return splits
def snake_case__ ( self ,__UpperCAmelCase ) -> pa.Table:
if self.config.features is not None:
A__ = self.config.features.arrow_schema
if all(not require_storage_cast(__UpperCAmelCase ) for feature in self.config.features.values() ):
# cheaper cast
A__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=__UpperCAmelCase )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
A__ = table_cast(__UpperCAmelCase ,__UpperCAmelCase )
return pa_table
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
A__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(__UpperCAmelCase ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(__UpperCAmelCase ) ):
A__ = pd.read_csv(__UpperCAmelCase ,iterator=__UpperCAmelCase ,dtype=__UpperCAmelCase ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(__UpperCAmelCase ):
A__ = pa.Table.from_pandas(__UpperCAmelCase )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(__UpperCAmelCase )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(__UpperCAmelCase )}: {e}''' )
raise
| 704
|
"""simple docstring"""
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
if len(UpperCamelCase__ ) <= 1:
return [tuple(UpperCamelCase__ )]
A__ = []
def generate(UpperCamelCase__ , UpperCamelCase__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , UpperCamelCase__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A__ , A__ = arr[k - 1], arr[i]
else: # k is odd
A__ , A__ = arr[k - 1], arr[0]
generate(k - 1 , UpperCamelCase__ )
generate(len(UpperCamelCase__ ) , UpperCamelCase__ )
return res
if __name__ == "__main__":
__lowerCamelCase = input("Enter numbers separated by a comma:\n").strip()
__lowerCamelCase = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 536
| 0
|
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 24
|
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[list[float]]:
__lowerCAmelCase : list[list[float]] = []
for data in source_data:
for i, el in enumerate(SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(SCREAMING_SNAKE_CASE ) )
return data_lists
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] , SCREAMING_SNAKE_CASE :list[int] ) -> list[list[float]]:
__lowerCAmelCase : list[list[float]] = []
for dlist, weight in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = min(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = max(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
__lowerCAmelCase : int = F'''Invalid weight of {weight:f} provided'''
raise ValueError(SCREAMING_SNAKE_CASE )
score_lists.append(SCREAMING_SNAKE_CASE )
return score_lists
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] ) -> list[float]:
__lowerCAmelCase : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = final_scores[j] + ele
return final_scores
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :list[list[float]] , SCREAMING_SNAKE_CASE :list[int] ) -> list[list[float]]:
__lowerCAmelCase : str = get_data(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = calculate_each_score(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = generate_final_scores(SCREAMING_SNAKE_CASE )
# append scores to source data
for i, ele in enumerate(SCREAMING_SNAKE_CASE ):
source_data[i].append(SCREAMING_SNAKE_CASE )
return source_data
| 504
| 0
|
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
UpperCAmelCase = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def lowercase ( ) -> int:
_UpperCamelCase = Github(os.environ['''GITHUB_TOKEN'''] )
_UpperCamelCase = g.get_repo('''huggingface/transformers''' )
_UpperCamelCase = repo.get_issues(state='''open''' )
for issue in open_issues:
_UpperCamelCase = sorted([comment for comment in issue.get_comments()] , key=lambda a__ : i.created_at , reverse=a__ )
_UpperCamelCase = comments[0] if len(a__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='''closed''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 717
|
"""simple docstring"""
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase):
def _UpperCamelCase ( self : List[Any] ) -> List[str]:
_UpperCamelCase = 0
def _UpperCamelCase ( self : Any ) -> str:
_UpperCamelCase = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Any ) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> List[str]:
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : int ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = CLIPConfig()
# Create a dummy config file with image_proceesor_type
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
_UpperCamelCase = CLIPImageProcessor(**__UpperCamelCase )
# save in new folder
model_config.save_pretrained(__UpperCamelCase )
config.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
# make sure private variable is not incorrectly saved
_UpperCamelCase = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
def _UpperCamelCase ( self : List[Any] ) -> List[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''clip-base''' )
def _UpperCamelCase ( self : Dict ) -> Union[str, Any]:
with self.assertRaisesRegex(
__UpperCamelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , revision='''aaaaaa''' )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
with self.assertRaisesRegex(
__UpperCamelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def _UpperCamelCase ( self : int ) -> Any:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__UpperCamelCase ):
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase , trust_remote_code=__UpperCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__UpperCamelCase ):
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = Path(__UpperCamelCase ) / '''preprocessor_config.json'''
_UpperCamelCase = Path(__UpperCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(__UpperCamelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(__UpperCamelCase , '''w''' ) )
_UpperCamelCase = CustomImageProcessor.from_pretrained(__UpperCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(__UpperCamelCase )
_UpperCamelCase = AutoImageProcessor.from_pretrained(__UpperCamelCase )
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def _UpperCamelCase ( self : List[str] ) -> Optional[Any]:
class UpperCAmelCase_ ( _lowercase):
snake_case__ = True
try:
AutoConfig.register('''custom''' , __UpperCamelCase )
AutoImageProcessor.register(__UpperCamelCase , __UpperCamelCase )
# If remote code is not set, the default is to use local
_UpperCamelCase = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
_UpperCamelCase = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=__UpperCamelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(__UpperCamelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 342
| 0
|
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : List[Any] = CpmAntTokenizer
lowercase__ : Any = False
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
super().setUp()
lowerCAmelCase__ = [
'''<d>''',
'''</d>''',
'''<s>''',
'''</s>''',
'''</_>''',
'''<unk>''',
'''<pad>''',
'''</n>''',
'''我''',
'''是''',
'''C''',
'''P''',
'''M''',
'''A''',
'''n''',
'''t''',
]
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
@tooslow
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = CpmAntTokenizer.from_pretrained('''openbmb/cpm-ant-10b''' )
lowerCAmelCase__ = '''今天天气真好!'''
lowerCAmelCase__ = ['''今天''', '''天气''', '''真''', '''好''', '''!''']
lowerCAmelCase__ = tokenizer.tokenize(lowerCamelCase_ )
self.assertListEqual(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = '''今天天气真好!'''
lowerCAmelCase__ = [tokenizer.bos_token] + tokens
lowerCAmelCase__ = [6, 98_02, 1_49_62, 20_82, 8_31, 2_44]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , lowerCamelCase_ )
lowerCAmelCase__ = tokenizer.decode(lowerCamelCase_ )
self.assertEqual(lowerCamelCase_ , lowerCamelCase_ )
| 90
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :int ) -> Dict:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Dict ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple=True ) -> Tuple:
a__ = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[Any] ,__snake_case :str ,__snake_case :Dict ,__snake_case :Any=True ) -> List[Any]:
a__ = ()
for resnet in self.resnets:
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :List[str] ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict=True ) -> int:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :Optional[Any]=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[Any]:
# there is always at least one resnet
a__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
a__ = []
for _ in range(self.num_layers ):
a__ = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
a__ = attentions
def __call__( self :Optional[Any] ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :int=True ) -> str:
a__ = self.resnets[0](__snake_case ,__snake_case )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
return hidden_states
| 335
| 0
|
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=13 , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=[1, 2, 1] , __SCREAMING_SNAKE_CASE=[2, 2, 4] , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=2.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-5 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , __SCREAMING_SNAKE_CASE=[1, 2, 3] , ):
"""simple docstring"""
UpperCamelCase : List[str] = parent
UpperCamelCase : Any = batch_size
UpperCamelCase : str = image_size
UpperCamelCase : List[Any] = patch_size
UpperCamelCase : Union[str, Any] = num_channels
UpperCamelCase : Tuple = embed_dim
UpperCamelCase : Optional[Any] = depths
UpperCamelCase : Optional[int] = num_heads
UpperCamelCase : Dict = window_size
UpperCamelCase : Union[str, Any] = mlp_ratio
UpperCamelCase : Tuple = qkv_bias
UpperCamelCase : List[str] = hidden_dropout_prob
UpperCamelCase : Optional[Any] = attention_probs_dropout_prob
UpperCamelCase : Optional[int] = drop_path_rate
UpperCamelCase : Dict = hidden_act
UpperCamelCase : Tuple = use_absolute_embeddings
UpperCamelCase : Optional[Any] = patch_norm
UpperCamelCase : Union[str, Any] = layer_norm_eps
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Any = is_training
UpperCamelCase : List[Any] = scope
UpperCamelCase : int = use_labels
UpperCamelCase : Tuple = type_sequence_label_size
UpperCamelCase : List[Any] = encoder_stride
UpperCamelCase : Optional[Any] = out_features
UpperCamelCase : str = out_indices
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase : Any = None
if self.use_labels:
UpperCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self ):
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[str] = MaskFormerSwinModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[Any] = model(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCamelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : List[Any] = MaskFormerSwinBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : Optional[Any] = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : int = ['''stem''']
UpperCamelCase : Dict = MaskFormerSwinBackbone(config=__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.prepare_config_and_inputs()
UpperCamelCase , UpperCamelCase , UpperCamelCase : List[Any] = config_and_inputs
UpperCamelCase : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _a, _a, unittest.TestCase):
'''simple docstring'''
__UpperCamelCase : Dict = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Any = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase : Any = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Tuple = False
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self ):
"""simple docstring"""
return
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
@unittest.skip('''Swin does not use inputs_embeds''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : str = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : Optional[int] = [*signature.parameters.keys()]
UpperCamelCase : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : int = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCamelCase : Optional[int] = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
UpperCamelCase : List[str] = outputs.hidden_states
UpperCamelCase : int = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
UpperCamelCase : Optional[int] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase : List[Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCamelCase : Optional[int] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : Dict = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = 3
UpperCamelCase : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCamelCase : Tuple = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCamelCase : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCamelCase : Optional[int] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCamelCase : Any = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase : List[str] = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def _lowercase ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def _lowercase ( self ):
"""simple docstring"""
pass
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : List[str] = 0
return t
def check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
UpperCamelCase : Any = model(**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = model(**__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
if isinstance(__SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(__SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(__SCREAMING_SNAKE_CASE )}. Dict has"""
f""" `nan`: {torch.isnan(__SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(__SCREAMING_SNAKE_CASE )}."""
) , )
recursive_check(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCamelCase : Any = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCamelCase : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : List[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
UpperCamelCase : List[str] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , {'''output_hidden_states''': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase, _a):
'''simple docstring'''
__UpperCamelCase : Optional[int] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase : Optional[Any] = MaskFormerSwinConfig
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = MaskFormerSwinModelTester(self )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase : List[str] = inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
UpperCamelCase : int = backbone_class(__SCREAMING_SNAKE_CASE )
backbone.to(__SCREAMING_SNAKE_CASE )
backbone.eval()
UpperCamelCase : str = backbone(**__SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCamelCase : List[str] = backbone(**__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCamelCase : Dict = backbone(**__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 643
|
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
__UpperCAmelCase : List[Any] = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
__UpperCAmelCase : List[str] = "UperNetConfig"
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0 , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = 1 , ):
"""simple docstring"""
super().__init__()
UpperCamelCase : str = nn.Convad(
in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE , )
UpperCamelCase : int = nn.BatchNormad(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = nn.ReLU()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = self.conv(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = self.batch_norm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = self.activation(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = [
nn.AdaptiveAvgPoolad(__SCREAMING_SNAKE_CASE ),
UperNetConvModule(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = input
for layer in self.layers:
UpperCamelCase : int = layer(__SCREAMING_SNAKE_CASE )
return hidden_state
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : List[Any] = pool_scales
UpperCamelCase : Dict = align_corners
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : Union[str, Any] = channels
UpperCamelCase : List[str] = []
for i, pool_scale in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCamelCase : Union[str, Any] = UperNetPyramidPoolingBlock(pool_scale=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , channels=__SCREAMING_SNAKE_CASE )
self.blocks.append(__SCREAMING_SNAKE_CASE )
self.add_module(str(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = []
for ppm in self.blocks:
UpperCamelCase : List[str] = ppm(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(__SCREAMING_SNAKE_CASE )
return ppm_outs
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__()
UpperCamelCase : int = config
UpperCamelCase : List[str] = config.pool_scales # e.g. (1, 2, 3, 6)
UpperCamelCase : Optional[int] = in_channels
UpperCamelCase : str = config.hidden_size
UpperCamelCase : str = False
UpperCamelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
UpperCamelCase : Optional[int] = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
UpperCamelCase : str = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
UpperCamelCase : Union[str, Any] = nn.ModuleList()
UpperCamelCase : Union[str, Any] = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
UpperCamelCase : List[Any] = UperNetConvModule(__SCREAMING_SNAKE_CASE , self.channels , kernel_size=1 )
UpperCamelCase : int = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(__SCREAMING_SNAKE_CASE )
self.fpn_convs.append(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = inputs[-1]
UpperCamelCase : int = [x]
psp_outs.extend(self.psp_modules(__SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Any = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Union[str, Any] = self.bottleneck(__SCREAMING_SNAKE_CASE )
return output
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : str = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(__SCREAMING_SNAKE_CASE ) )
# build top-down path
UpperCamelCase : int = len(__SCREAMING_SNAKE_CASE )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : Optional[int] = laterals[i - 1].shape[2:]
UpperCamelCase : Optional[Any] = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=__SCREAMING_SNAKE_CASE , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
UpperCamelCase : str = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
UpperCamelCase : int = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
UpperCamelCase : str = torch.cat(__SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase : Tuple = self.fpn_bottleneck(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( nn.Module):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 2 , __SCREAMING_SNAKE_CASE = 3 , __SCREAMING_SNAKE_CASE = 1 ):
"""simple docstring"""
super().__init__()
UpperCamelCase : Dict = config
UpperCamelCase : Optional[Any] = config.auxiliary_in_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_channels
UpperCamelCase : Union[str, Any] = config.auxiliary_num_convs
UpperCamelCase : Optional[Any] = config.auxiliary_concat_input
UpperCamelCase : List[str] = in_index
UpperCamelCase : Any = (kernel_size // 2) * dilation
UpperCamelCase : Optional[Any] = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE , dilation=__SCREAMING_SNAKE_CASE ) )
if self.num_convs == 0:
UpperCamelCase : str = nn.Identity()
else:
UpperCamelCase : Dict = nn.Sequential(*__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : Union[str, Any] = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=__SCREAMING_SNAKE_CASE , padding=kernel_size // 2 )
UpperCamelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def _lowercase ( self ):
"""simple docstring"""
self.apply(self._init_weights )
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCamelCase : Any = encoder_hidden_states[self.in_index]
UpperCamelCase : str = self.convs(__SCREAMING_SNAKE_CASE )
if self.concat_input:
UpperCamelCase : int = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
UpperCamelCase : Union[str, Any] = self.classifier(__SCREAMING_SNAKE_CASE )
return output
class UpperCAmelCase_ ( _a):
'''simple docstring'''
__UpperCamelCase : Optional[Any] = UperNetConfig
__UpperCamelCase : Optional[int] = "pixel_values"
__UpperCamelCase : Dict = True
def _lowercase ( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def _lowercase ( self ):
"""simple docstring"""
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def _lowercase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCamelCase : str = value
__UpperCAmelCase : List[Any] = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
__UpperCAmelCase : Union[str, Any] = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
"UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.", _a, )
class UpperCAmelCase_ ( _a):
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
super().__init__(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Dict = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
UpperCamelCase : int = UperNetHead(__SCREAMING_SNAKE_CASE , in_channels=self.backbone.channels )
UpperCamelCase : int = UperNetFCNHead(__SCREAMING_SNAKE_CASE ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=__SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def _lowercase ( self , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ):
"""simple docstring"""
UpperCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
UpperCamelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
UpperCamelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions
UpperCamelCase : Tuple = self.backbone.forward_with_filtered_kwargs(
__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE , output_attentions=__SCREAMING_SNAKE_CASE )
UpperCamelCase : List[str] = outputs.feature_maps
UpperCamelCase : Union[str, Any] = self.decode_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[int] = nn.functional.interpolate(__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : int = None
if self.auxiliary_head is not None:
UpperCamelCase : int = self.auxiliary_head(__SCREAMING_SNAKE_CASE )
UpperCamelCase : Tuple = nn.functional.interpolate(
__SCREAMING_SNAKE_CASE , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=__SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
UpperCamelCase : Optional[int] = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
UpperCamelCase : Tuple = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Optional[Any] = loss_fct(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCamelCase : Union[str, Any] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
UpperCamelCase : Optional[Any] = (logits,) + outputs[1:]
else:
UpperCamelCase : int = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=__SCREAMING_SNAKE_CASE , logits=__SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 643
| 1
|
"""simple docstring"""
def _snake_case ( _snake_case : str ) -> str:
'''simple docstring'''
_A = 0
# if input_string is "aba" than new_input_string become "a|b|a"
_A = ''
_A = ''
# append each character + "|" in new_string for range(0, length-1)
for i in input_string[: len(_snake_case ) - 1]:
new_input_string += i + "|"
# append last character
new_input_string += input_string[-1]
# we will store the starting and ending of previous furthest ending palindromic
# substring
_A , _A = 0, 0
# length[i] shows the length of palindromic substring with center i
_A = [1 for i in range(len(_snake_case ) )]
# for each character in new_string find corresponding palindromic string
_A = 0
for j in range(len(_snake_case ) ):
_A = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 )
while (
j - k >= 0
and j + k < len(_snake_case )
and new_input_string[k + j] == new_input_string[j - k]
):
k += 1
_A = 2 * k - 1
# does this string is ending after the previously explored end (that is r) ?
# if yes the update the new r to the last index of this
if j + k - 1 > r:
_A = j - k + 1 # noqa: E741
_A = j + k - 1
# update max_length and start position
if max_length < length[j]:
_A = length[j]
_A = j
# create that string
_A = new_input_string[start - max_length // 2 : start + max_length // 2 + 1]
for i in s:
if i != "|":
output_string += i
return output_string
if __name__ == "__main__":
import doctest
doctest.testmod()
| 7
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCamelCase_: Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase_: List[str] = DisjunctiveConstraint(snake_case_ )
self.assertTrue(isinstance(dc.token_ids , snake_case_ ) )
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase__ ( self : Optional[Any] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCamelCase_: Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint(snake_case_ ) # fails here
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase_: Dict = DisjunctiveConstraint(snake_case_ )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = dc.update(1 )
UpperCamelCase_: Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = dc.update(2 )
UpperCamelCase_: Tuple = stepped is True and completed is False and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = dc.update(3 )
UpperCamelCase_: Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase_: Tuple = DisjunctiveConstraint(snake_case_ )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 548
| 0
|
from ....utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=None , lowercase=2_0_4_8 ):
"""simple docstring"""
A_ : Union[str, Any] = config.__dict__
A_ : Union[str, Any] = modal_hidden_size
if num_labels:
A_ : Any = num_labels
| 717
|
from transformers import DistilBertTokenizer, DistilBertTokenizerFast
from transformers.testing_utils import require_tokenizers, slow
from ..bert.test_tokenization_bert import BertTokenizationTest
@require_tokenizers
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = DistilBertTokenizer
lowerCamelCase_ = DistilBertTokenizerFast
lowerCamelCase_ = True
@slow
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = DistilBertTokenizer.from_pretrained('distilbert-base-uncased' )
A_ : Tuple = tokenizer.encode('sequence builders' , add_special_tokens=lowercase )
A_ : List[Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=lowercase )
A_ : str = tokenizer.build_inputs_with_special_tokens(lowercase )
A_ : Tuple = tokenizer.build_inputs_with_special_tokens(lowercase , lowercase )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
| 70
| 0
|
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
snake_case = """src/transformers"""
snake_case = """docs/source/en"""
snake_case = """."""
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
with open(__UpperCamelCase , "r" , encoding="utf-8" , newline="\n" ) as f:
_lowerCAmelCase : Union[str, Any] = f.readlines()
# Find the start prompt.
_lowerCAmelCase : List[str] = 0
while not lines[start_index].startswith(__UpperCamelCase ):
start_index += 1
start_index += 1
_lowerCAmelCase : Any = start_index
while not lines[end_index].startswith(__UpperCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
snake_case = """Model|Encoder|Decoder|ForConditionalGeneration"""
# Regexes that match TF/Flax/PT model names.
snake_case = re.compile(r"TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
snake_case = re.compile(r"Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
snake_case = re.compile(r"(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)")
# This is to make sure the transformers module imported is the one in the repo.
snake_case = direct_transformers_import(TRANSFORMERS_PATH)
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = re.finditer(".+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)" , __UpperCamelCase )
return [m.group(0 ) for m in matches]
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : int = 2 if text == '''✅''' or text == '''❌''' else len(__UpperCamelCase )
_lowerCAmelCase : int = (width - text_length) // 2
_lowerCAmelCase : Optional[Any] = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def UpperCamelCase_ ( ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
_lowerCAmelCase : Dict = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
_lowerCAmelCase : Union[str, Any] = {name: config.replace("Config" , "" ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
_lowerCAmelCase : Tuple = collections.defaultdict(__UpperCamelCase )
_lowerCAmelCase : List[str] = collections.defaultdict(__UpperCamelCase )
_lowerCAmelCase : Tuple = collections.defaultdict(__UpperCamelCase )
_lowerCAmelCase : Optional[int] = collections.defaultdict(__UpperCamelCase )
_lowerCAmelCase : Union[str, Any] = collections.defaultdict(__UpperCamelCase )
# Let's lookup through all transformers object (once).
for attr_name in dir(__UpperCamelCase ):
_lowerCAmelCase : Any = None
if attr_name.endswith("Tokenizer" ):
_lowerCAmelCase : Union[str, Any] = slow_tokenizers
_lowerCAmelCase : int = attr_name[:-9]
elif attr_name.endswith("TokenizerFast" ):
_lowerCAmelCase : Dict = fast_tokenizers
_lowerCAmelCase : Optional[Any] = attr_name[:-13]
elif _re_tf_models.match(__UpperCamelCase ) is not None:
_lowerCAmelCase : Union[str, Any] = tf_models
_lowerCAmelCase : Optional[Any] = _re_tf_models.match(__UpperCamelCase ).groups()[0]
elif _re_flax_models.match(__UpperCamelCase ) is not None:
_lowerCAmelCase : Any = flax_models
_lowerCAmelCase : List[str] = _re_flax_models.match(__UpperCamelCase ).groups()[0]
elif _re_pt_models.match(__UpperCamelCase ) is not None:
_lowerCAmelCase : Any = pt_models
_lowerCAmelCase : Optional[int] = _re_pt_models.match(__UpperCamelCase ).groups()[0]
if lookup_dict is not None:
while len(__UpperCamelCase ) > 0:
if attr_name in model_name_to_prefix.values():
_lowerCAmelCase : List[Any] = True
break
# Try again after removing the last word in the name
_lowerCAmelCase : Optional[Any] = ''''''.join(camel_case_split(__UpperCamelCase )[:-1] )
# Let's build that table!
_lowerCAmelCase : Optional[Any] = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
_lowerCAmelCase : Tuple = ['''Model''', '''Tokenizer slow''', '''Tokenizer fast''', '''PyTorch support''', '''TensorFlow support''', '''Flax Support''']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
_lowerCAmelCase : Optional[int] = [len(__UpperCamelCase ) + 2 for c in columns]
_lowerCAmelCase : str = max([len(__UpperCamelCase ) for name in model_names] ) + 2
# Build the table per se
_lowerCAmelCase : Optional[Any] = '''|''' + '''|'''.join([_center_text(__UpperCamelCase , __UpperCamelCase ) for c, w in zip(__UpperCamelCase , __UpperCamelCase )] ) + '''|\n'''
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([":" + "-" * (w - 2) + ":" for w in widths] ) + "|\n"
_lowerCAmelCase : List[Any] = {True: '''✅''', False: '''❌'''}
for name in model_names:
_lowerCAmelCase : Union[str, Any] = model_name_to_prefix[name]
_lowerCAmelCase : Any = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(__UpperCamelCase , __UpperCamelCase ) for l, w in zip(__UpperCamelCase , __UpperCamelCase )] ) + "|\n"
return table
def UpperCamelCase_ ( lowerCAmelCase__=False ):
"""simple docstring"""
_lowerCAmelCase : Tuple = _find_text_in_file(
filename=os.path.join(__UpperCamelCase , "index.md" ) , start_prompt="<!--This table is updated automatically from the auto modules" , end_prompt="<!-- End table-->" , )
_lowerCAmelCase : Optional[int] = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(__UpperCamelCase , "index.md" ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
"The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this." )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
snake_case = parser.parse_args()
check_model_table(args.fix_and_overwrite)
| 424
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
A__ : Any = logging.get_logger(__name__)
class lowercase ( __UpperCamelCase ):
__a = ["""pixel_values"""]
def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 1 / 255 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'''height''': 384, '''width''': 384}
lowerCAmelCase__ : Optional[Any] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : Dict = do_resize
lowerCAmelCase__ : List[Any] = size
lowerCAmelCase__ : Dict = resample
lowerCAmelCase__ : Any = do_rescale
lowerCAmelCase__ : str = rescale_factor
lowerCAmelCase__ : Union[str, Any] = do_normalize
lowerCAmelCase__ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : Union[str, Any] = do_convert_rgb
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowerCAmelCase__ : Any = (size['''height'''], size['''width'''])
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : List[str] = resample if resample is not None else self.resample
lowerCAmelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Optional[int] = size if size is not None else self.size
lowerCAmelCase__ : List[str] = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ : List[Any] = make_list_of_images(SCREAMING_SNAKE_CASE__ )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Dict = [convert_to_rgb(SCREAMING_SNAKE_CASE__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : List[Any] = [to_numpy_array(SCREAMING_SNAKE_CASE__ ) for image in images]
if do_resize:
lowerCAmelCase__ : Dict = [self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_rescale:
lowerCAmelCase__ : List[Any] = [self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ ) for image in images]
if do_normalize:
lowerCAmelCase__ : Union[str, Any] = [self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ : Optional[int] = [to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for image in images]
lowerCAmelCase__ : Union[str, Any] = BatchFeature(data={'''pixel_values''': images} , tensor_type=SCREAMING_SNAKE_CASE__ )
return encoded_outputs
| 233
| 0
|
"""simple docstring"""
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCAmelCase__ )
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : str =field(default="""automatic-speech-recognition""" ,metadata={"""include_in_asdict_even_if_is_default""": True} )
__UpperCAmelCase : ClassVar[Features] =Features({"""audio""": Audio()} )
__UpperCAmelCase : ClassVar[Features] =Features({"""transcription""": Value("""string""" )} )
__UpperCAmelCase : str ="audio"
__UpperCAmelCase : str ="transcription"
def snake_case ( self , __a ):
if self.audio_column not in features:
raise ValueError(f"Column {self.audio_column} is not present in features." )
if not isinstance(features[self.audio_column] , __a ):
raise ValueError(f"Column {self.audio_column} is not an Audio type." )
__lowerCAmelCase = copy.deepcopy(self )
__lowerCAmelCase = self.input_schema.copy()
__lowerCAmelCase = features[self.audio_column]
__lowerCAmelCase = input_schema
return task_template
@property
def snake_case ( self ):
return {self.audio_column: "audio", self.transcription_column: "transcription"}
| 282
|
"""simple docstring"""
import string
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ""
for i in sequence:
__lowerCAmelCase = ord(_UpperCamelCase )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = string.ascii_letters
__lowerCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(_UpperCamelCase )] if c in letters else c for c in sequence )
def _lowerCamelCase ( ):
'''simple docstring'''
from timeit import timeit
print("Running performance benchmarks..." )
__lowerCAmelCase = "from string import printable ; from __main__ import atbash, atbash_slow"
print(f"> atbash_slow(): {timeit('atbash_slow(printable)' , setup=_UpperCamelCase )} seconds" )
print(f"> atbash(): {timeit('atbash(printable)' , setup=_UpperCamelCase )} seconds" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 282
| 1
|
'''simple docstring'''
import string
import numpy
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ) -> List[Any]:
return b if a == 0 else greatest_common_divisor(b % a , lowerCamelCase_ )
class a_ :
__lowerCAmelCase : str = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCAmelCase : List[str] = numpy.vectorize(lambda _a : x % 3_6 )
__lowerCAmelCase : Optional[int] = numpy.vectorize(_a )
def __init__( self , snake_case_ ):
_lowerCAmelCase : List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowerCAmelCase : Union[str, Any] = encrypt_key.shape[0]
def __UpperCamelCase ( self , snake_case_ ):
return self.key_string.index(_lowerCamelCase )
def __UpperCamelCase ( self , snake_case_ ):
return self.key_string[round(_lowerCamelCase )]
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase : Any = det % len(self.key_string )
_lowerCAmelCase : List[str] = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1:
_lowerCAmelCase : Union[str, Any] = (
f'determinant modular {req_l} of encryption key({det}) '
f'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(_lowerCamelCase )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Tuple = [char for char in text.upper() if char in self.key_string]
_lowerCAmelCase : int = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = self.process_text(text.upper() )
_lowerCAmelCase : str = """"""
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase : Optional[Any] = text[i : i + self.break_key]
_lowerCAmelCase : Optional[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch]
_lowerCAmelCase : Dict = numpy.array([vec] ).T
_lowerCAmelCase : Any = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
_lowerCAmelCase : Tuple = """""".join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase : List[str] = det % len(self.key_string )
_lowerCAmelCase : List[Any] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowerCAmelCase : str = i
break
_lowerCAmelCase : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.make_decrypt_key()
_lowerCAmelCase : int = self.process_text(text.upper() )
_lowerCAmelCase : int = """"""
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase : int = text[i : i + self.break_key]
_lowerCAmelCase : str = [self.replace_letters(_lowerCamelCase ) for char in batch]
_lowerCAmelCase : int = numpy.array([vec] ).T
_lowerCAmelCase : Tuple = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
_lowerCAmelCase : Any = """""".join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _UpperCAmelCase ( ) -> int:
_lowerCAmelCase : Optional[Any] = int(input("""Enter the order of the encryption key: """ ) )
_lowerCAmelCase : List[Any] = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(lowerCamelCase_ ):
_lowerCAmelCase : Dict = [int(lowerCamelCase_ ) for x in input().split()]
hill_matrix.append(lowerCamelCase_ )
_lowerCAmelCase : int = HillCipher(numpy.array(lowerCamelCase_ ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_lowerCAmelCase : Tuple = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_lowerCAmelCase : List[Any] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(lowerCamelCase_ ) )
elif option == "2":
_lowerCAmelCase : str = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 384
|
'''simple docstring'''
from __future__ import annotations
from typing import Any
class UpperCamelCase_ :
"""simple docstring"""
def __init__( self : int , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : float = 0 ) -> None:
__magic_name__ , __magic_name__ = row, column
__magic_name__ = [[default_value for c in range(_lowerCamelCase )] for r in range(_lowerCamelCase )]
def __str__( self : Optional[Any] ) -> str:
__magic_name__ = f'Matrix consist of {self.row} rows and {self.column} columns\n'
# Make string identifier
__magic_name__ = 0
for row_vector in self.array:
for obj in row_vector:
__magic_name__ = max(_lowerCamelCase , len(str(_lowerCamelCase ) ) )
__magic_name__ = f'%{max_element_length}s'
# Make string and return
def single_line(_lowerCamelCase : list[float] ) -> str:
nonlocal string_format_identifier
__magic_name__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(_lowerCamelCase ) for row_vector in self.array )
return s
def __repr__( self : Optional[int] ) -> str:
return str(self )
def __A ( self : Optional[Any] , _lowerCamelCase : tuple[int, int] ) -> bool:
if not (isinstance(_lowerCamelCase , (list, tuple) ) and len(_lowerCamelCase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Optional[int] , _lowerCamelCase : tuple[int, int] ) -> Any:
assert self.validate_indicies(_lowerCamelCase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : Tuple , _lowerCamelCase : tuple[int, int] , _lowerCamelCase : float ) -> None:
assert self.validate_indicies(_lowerCamelCase )
__magic_name__ = value
def __add__( self : Union[str, Any] , _lowerCamelCase : Matrix ) -> Matrix:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == another.row and self.column == another.column
# Add
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] + another[r, c]
return result
def __neg__( self : int ) -> Matrix:
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = -self[r, c]
return result
def __sub__( self : Optional[int] , _lowerCamelCase : Matrix ) -> Matrix:
return self + (-another)
def __mul__( self : Optional[int] , _lowerCamelCase : int | float | Matrix ) -> Matrix:
if isinstance(_lowerCamelCase , (int, float) ): # Scalar multiplication
__magic_name__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c] * another
return result
elif isinstance(_lowerCamelCase , _lowerCamelCase ): # Matrix multiplication
assert self.column == another.row
__magic_name__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
__magic_name__ = f'Unsupported type given for another ({type(_lowerCamelCase )})'
raise TypeError(_lowerCamelCase )
def __A ( self : Optional[int] ) -> Matrix:
__magic_name__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
__magic_name__ = self[r, c]
return result
def __A ( self : int , _lowerCamelCase : Matrix , _lowerCamelCase : Matrix ) -> Any:
assert isinstance(_lowerCamelCase , _lowerCamelCase ) and isinstance(_lowerCamelCase , _lowerCamelCase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
__magic_name__ = v.transpose()
__magic_name__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def __snake_case ( ):
'''simple docstring'''
__magic_name__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
__magic_name__ = 1
print(F'a^(-1) is {ainv}' )
# u, v
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 1, 2, -3
__magic_name__ = Matrix(3 , 1 , 0 )
__magic_name__ , __magic_name__ , __magic_name__ = 4, -2, 5
print(F'u is {u}' )
print(F'v is {v}' )
print(F'uv^T is {u * v.transpose()}' )
# Sherman Morrison
print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(lowerCamelCase_ , lowerCamelCase_ )}' )
def __snake_case ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 664
| 0
|
"""simple docstring"""
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 712
|
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 600
| 0
|
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def snake_case__ ( self ) -> Optional[int]:
A__ = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
A__ = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
A__ = model(snake_case_ )['''last_hidden_state''']
A__ = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , snake_case_ )
# compare the actual values for a slice.
A__ = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 104
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 258
| 0
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
A : Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class lowerCamelCase ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *__snake_case : List[Any] , **__snake_case : int ):
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
requires_backends(self , 'vision' )
self.check_model_type(__snake_case )
def __call__( self : Optional[Any] , __snake_case : Union[str, List[str], "Image.Image", List["Image.Image"]] , **__snake_case : Optional[Any] ):
'''simple docstring'''
return super().__call__(__snake_case , **__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **__snake_case : List[Any] ):
'''simple docstring'''
return {}, {}, {}
def SCREAMING_SNAKE_CASE_ ( self : Dict , __snake_case : Union[str, Any] ):
'''simple docstring'''
_snake_case: int = load_image(__snake_case )
_snake_case: Any = image.size
_snake_case: Optional[Any] = self.image_processor(images=__snake_case , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Tuple ):
'''simple docstring'''
_snake_case: List[str] = self.model(**__snake_case )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , __snake_case : Optional[Any] ):
'''simple docstring'''
_snake_case: List[str] = model_outputs.predicted_depth
_snake_case: str = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=__snake_case )
_snake_case: Optional[Any] = prediction.squeeze().cpu().numpy()
_snake_case: Optional[Any] = (output * 2_55 / np.max(__snake_case )).astype('uint8' )
_snake_case: Optional[Any] = Image.fromarray(__snake_case )
_snake_case: Any = {}
_snake_case: Dict = predicted_depth
_snake_case: Any = depth
return output_dict
| 700
|
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A : Optional[Any] = tuple[int, int]
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ):
'''simple docstring'''
_snake_case: set[int] = vertices
_snake_case: dict[EdgeT, int] = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : EdgeT , __snake_case : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_snake_case: Dict = weight
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Graph = Graph({min(self.vertices )} , {} )
_snake_case: EdgeT
_snake_case: int
_snake_case: EdgeT
_snake_case: int
while len(subgraph.vertices ) < len(self.vertices ):
_snake_case: List[str] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_snake_case: Optional[Any] = edge
_snake_case: Optional[int] = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def lowercase_ ( lowercase__ = "p107_network.txt" ) ->int:
_snake_case: str = os.path.abspath(os.path.dirname(lowercase__ ) )
_snake_case: str = os.path.join(lowercase__ , lowercase__ )
_snake_case: dict[EdgeT, int] = {}
_snake_case: list[str]
_snake_case: int
_snake_case: int
with open(lowercase__ ) as f:
_snake_case: Tuple = f.read().strip().split('\n' )
_snake_case: Tuple = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_snake_case: int = int(adjaceny_matrix[edgea][edgea] )
_snake_case: Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
_snake_case: Graph = graph.prims_algorithm()
_snake_case: int = sum(graph.edges.values() )
_snake_case: int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 273
| 0
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
@staticmethod
@abstractmethod
def _lowercase ( SCREAMING_SNAKE_CASE_ : ArgumentParser ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def _lowercase ( self : List[Any] ) -> List[Any]:
raise NotImplementedError()
| 97
|
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class lowercase__:
"""simple docstring"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str=1_3 , SCREAMING_SNAKE_CASE_ : Optional[Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Tuple=True , SCREAMING_SNAKE_CASE_ : Tuple=9_9 , SCREAMING_SNAKE_CASE_ : List[str]=3_2 , SCREAMING_SNAKE_CASE_ : int=2 , SCREAMING_SNAKE_CASE_ : Tuple=4 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE_ : str=1_6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : List[Any]=True , SCREAMING_SNAKE_CASE_ : Union[str, Any]="None" , SCREAMING_SNAKE_CASE_ : Optional[Any]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=4 , SCREAMING_SNAKE_CASE_ : str=None , ) -> str:
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = seq_length
lowercase_ = is_training
lowercase_ = use_input_mask
lowercase_ = use_token_type_ids
lowercase_ = use_labels
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = intermediate_size
lowercase_ = hidden_act
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = type_sequence_label_size
lowercase_ = initializer_range
lowercase_ = num_labels
lowercase_ = num_choices
lowercase_ = relative_attention
lowercase_ = position_biased_input
lowercase_ = pos_att_type
lowercase_ = scope
def _lowercase ( self : List[Any] ) -> List[Any]:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ = None
if self.use_input_mask:
lowercase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase_ = None
if self.use_token_type_ids:
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ = None
lowercase_ = None
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=SCREAMING_SNAKE_CASE_ , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> List[Any]:
lowercase_ = TFDebertaVaModel(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase_ = [input_ids, input_mask]
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Any ) -> Union[str, Any]:
lowercase_ = TFDebertaVaForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ) -> List[str]:
lowercase_ = self.num_labels
lowercase_ = TFDebertaVaForSequenceClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]:
lowercase_ = self.num_labels
lowercase_ = TFDebertaVaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> str:
lowercase_ = TFDebertaVaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
lowercase_ = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : List[str] ) -> Optional[int]:
lowercase_ = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) = config_and_inputs
lowercase_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a :str = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
a :int = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
a :int = False
a :Any = False
def _lowercase ( self : Dict ) -> str:
lowercase_ = TFDebertaVaModelTester(self )
lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def _lowercase ( self : Tuple ) -> Dict:
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ) -> int:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict ) -> List[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Any ) -> str:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase ( self : Tuple ) -> Dict:
lowercase_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_tf
class lowercase__( unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason='''Model not available yet''' )
def _lowercase ( self : Optional[Any] ) -> Union[str, Any]:
pass
@slow
def _lowercase ( self : List[Any] ) -> Dict:
lowercase_ = TFDebertaVaModel.from_pretrained('''kamalkraj/deberta-v2-xlarge''' )
lowercase_ = tf.constant([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
lowercase_ = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase_ = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowercase_ = tf.constant(
[[[0.23_56, 0.19_48, 0.03_69], [-0.10_63, 0.35_86, -0.51_52], [-0.63_99, -0.02_59, -0.25_25]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
| 97
| 1
|
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
_lowerCamelCase ="\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class a_ ( unittest.TestCase , lowerCamelCase_ ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =load_tool('text-question-answering' )
self.tool.setup()
SCREAMING_SNAKE_CASE =load_tool('text-question-answering' ,remote=snake_case )
def _lowerCAmelCase ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE =self.tool(snake_case ,'What did Hugging Face do in April 2021?' )
self.assertEqual(snake_case ,'launched the BigScience Research Workshop' )
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =self.remote_tool(snake_case ,'What did Hugging Face do in April 2021?' )
self.assertEqual(snake_case ,'launched the BigScience Research Workshop' )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.tool(text=snake_case ,question='What did Hugging Face do in April 2021?' )
self.assertEqual(snake_case ,'launched the BigScience Research Workshop' )
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE =self.remote_tool(text=snake_case ,question='What did Hugging Face do in April 2021?' )
self.assertEqual(snake_case ,'launched the BigScience Research Workshop' )
| 706
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase =logging.get_logger(__name__)
_lowerCamelCase ={
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json",
}
class a_ ( lowerCamelCase_ ):
"""simple docstring"""
__UpperCAmelCase = 'lxmert'
__UpperCAmelCase = {}
def __init__( self : int ,snake_case : List[Any]=30522 ,snake_case : str=768 ,snake_case : Union[str, Any]=12 ,snake_case : int=9500 ,snake_case : Any=1600 ,snake_case : Union[str, Any]=400 ,snake_case : int=3072 ,snake_case : Any="gelu" ,snake_case : Any=0.1 ,snake_case : int=0.1 ,snake_case : Optional[int]=512 ,snake_case : int=2 ,snake_case : Dict=0.02 ,snake_case : List[Any]=1e-12 ,snake_case : List[str]=9 ,snake_case : Tuple=5 ,snake_case : Tuple=5 ,snake_case : List[Any]=2048 ,snake_case : Union[str, Any]=4 ,snake_case : Any=6.67 ,snake_case : Dict=True ,snake_case : Union[str, Any]=True ,snake_case : Union[str, Any]=True ,snake_case : Dict=True ,snake_case : str=True ,snake_case : int=True ,snake_case : Any=True ,**snake_case : Dict ,):
SCREAMING_SNAKE_CASE =vocab_size
SCREAMING_SNAKE_CASE =hidden_size
SCREAMING_SNAKE_CASE =num_attention_heads
SCREAMING_SNAKE_CASE =hidden_act
SCREAMING_SNAKE_CASE =intermediate_size
SCREAMING_SNAKE_CASE =hidden_dropout_prob
SCREAMING_SNAKE_CASE =attention_probs_dropout_prob
SCREAMING_SNAKE_CASE =max_position_embeddings
SCREAMING_SNAKE_CASE =type_vocab_size
SCREAMING_SNAKE_CASE =initializer_range
SCREAMING_SNAKE_CASE =layer_norm_eps
SCREAMING_SNAKE_CASE =num_qa_labels
SCREAMING_SNAKE_CASE =num_object_labels
SCREAMING_SNAKE_CASE =num_attr_labels
SCREAMING_SNAKE_CASE =l_layers
SCREAMING_SNAKE_CASE =x_layers
SCREAMING_SNAKE_CASE =r_layers
SCREAMING_SNAKE_CASE =visual_feat_dim
SCREAMING_SNAKE_CASE =visual_pos_dim
SCREAMING_SNAKE_CASE =visual_loss_normalizer
SCREAMING_SNAKE_CASE =task_matched
SCREAMING_SNAKE_CASE =task_mask_lm
SCREAMING_SNAKE_CASE =task_obj_predict
SCREAMING_SNAKE_CASE =task_qa
SCREAMING_SNAKE_CASE =visual_obj_loss
SCREAMING_SNAKE_CASE =visual_attr_loss
SCREAMING_SNAKE_CASE =visual_feat_loss
SCREAMING_SNAKE_CASE ={'vision': r_layers, 'cross_encoder': x_layers, 'language': l_layers}
super().__init__(**snake_case )
| 252
| 0
|
from manim import *
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def UpperCamelCase_ ( self : str ):
__A = Rectangle(height=0.5 ,width=0.5 )
__A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
__A = [mem.copy() for i in range(6 )]
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = VGroup(A ,A ).arrange(A ,buff=0 )
__A = Text("CPU" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
__A = [mem.copy() for i in range(1 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = Text("GPU" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
gpu.align_to(A ,A )
gpu.set_x(gpu.get_x() - 1 )
self.add(A )
__A = [mem.copy() for i in range(6 )]
__A = VGroup(*A ).arrange(A ,buff=0 )
__A = Text("Model" ,font_size=24 )
__A = Group(A ,A ).arrange(A ,buff=0.5 ,aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.play(
Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,Create(A ,run_time=1 ) ,)
__A = MarkupText(
f'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' ,font_size=24 ,)
__A = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__A = MarkupText(
f'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' ,font_size=18 ,)
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(A ,run_time=2.5 ) ,Write(A ) ,Write(A ) )
self.add(A )
__A = []
__A = []
__A = []
for i, rect in enumerate(A ):
__A = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(A ,opacity=0.7 )
cpu_target.move_to(A )
cpu_target.generate_target()
__A = 0.46 / 4
__A = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) ,buff=0.02 ,direction=A )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target ,direction=A ,buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target ,direction=A ,buff=0.0 )
cpu_targs.append(A )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(A ) )
second_animations.append(MoveToTarget(A ,run_time=1.5 ) )
self.play(*A )
self.play(*A )
self.wait()
| 55
|
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , a , a ) -> Optional[Any]:
super().__init__()
# make sure scheduler can always be converted to DDIM
snake_case_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self , a = 1 , a = None , a = 0.0 , a = 50 , a = None , a = "pil" , a = True , ) -> Union[ImagePipelineOutput, Tuple]:
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , a ):
snake_case_ = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
snake_case_ = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(a , a ) and len(a ) != batch_size:
raise ValueError(
F'''You have passed a list of generators of length {len(a )}, but requested an effective batch'''
F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
snake_case_ = randn_tensor(a , generator=a , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
snake_case_ = self.unet(a , a ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
snake_case_ = self.scheduler.step(
a , a , a , eta=a , use_clipped_model_output=a , generator=a ).prev_sample
snake_case_ = (image / 2 + 0.5).clamp(0 , 1 )
snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
snake_case_ = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a )
| 198
| 0
|
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
A_ = logging.get_logger(__name__)
A_ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = 'deberta-v2'
def __init__( self , SCREAMING_SNAKE_CASE_=128100 , SCREAMING_SNAKE_CASE_=1536 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=6144 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=512 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=1E-7 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=-1 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_="gelu" , **SCREAMING_SNAKE_CASE_ , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = relative_attention
lowerCamelCase_ = max_relative_positions
lowerCamelCase_ = pad_token_id
lowerCamelCase_ = position_biased_input
# Backwards compatibility
if type(UpperCamelCase__ ) == str:
lowerCamelCase_ = [x.strip() for x in pos_att_type.lower().split('|' )]
lowerCamelCase_ = pos_att_type
lowerCamelCase_ = vocab_size
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = kwargs.get('pooler_hidden_size' , UpperCamelCase__ )
lowerCamelCase_ = pooler_dropout
lowerCamelCase_ = pooler_hidden_act
class UpperCAmelCase ( UpperCamelCase_ ):
'''simple docstring'''
@property
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
if self.task == "multiple-choice":
lowerCamelCase_ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowerCamelCase_ = {0: '''batch''', 1: '''sequence'''}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def UpperCamelCase( self ) -> List[str]:
'''simple docstring'''
return 12
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = -1 , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 40 , SCREAMING_SNAKE_CASE_ = 40 , SCREAMING_SNAKE_CASE_ = None , ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 712
|
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class UpperCAmelCase :
'''simple docstring'''
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=24 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=5 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=2 , ) -> Dict:
'''simple docstring'''
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = patch_size
lowerCamelCase_ = max_length
lowerCamelCase_ = num_mel_bins
lowerCamelCase_ = is_training
lowerCamelCase_ = use_labels
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = scope
lowerCamelCase_ = frequency_stride
lowerCamelCase_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
lowerCamelCase_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
lowerCamelCase_ = (self.max_length - self.patch_size) // self.time_stride + 1
lowerCamelCase_ = frequency_out_dimension * time_out_dimension
lowerCamelCase_ = num_patches + 2
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = self.get_config()
return config, input_values, labels
def UpperCamelCase( self ) -> str:
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ = ASTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase( self ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase_ = self.prepare_config_and_inputs()
(
(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,(
lowerCamelCase_
) ,
) = config_and_inputs
lowerCamelCase_ = {'input_values': input_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
{'audio-classification': ASTForAudioClassification, 'feature-extraction': ASTModel}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def UpperCamelCase( self ) -> int:
'''simple docstring'''
lowerCamelCase_ = ASTModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='AST does not use inputs_embeds' )
def UpperCamelCase( self ) -> Any:
'''simple docstring'''
pass
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCamelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ ,lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ = model_class(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ = [*signature.parameters.keys()]
lowerCamelCase_ = ['input_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase( self ) -> str:
'''simple docstring'''
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = ASTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = hf_hub_download(
repo_id='nielsr/audio-spectogram-transformer-checkpoint' ,filename='sample_audio.flac' ,repo_type='dataset' )
lowerCamelCase_ ,lowerCamelCase_ = torchaudio.load(__UpperCamelCase )
return audio, sampling_rate
@require_torch
@require_torchaudio
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase( self ) -> int:
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' )
if is_torchaudio_available()
else None
)
@slow
def UpperCamelCase( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = self.default_feature_extractor
lowerCamelCase_ = ASTForAudioClassification.from_pretrained('MIT/ast-finetuned-audioset-10-10-0.4593' ).to(SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = self.default_feature_extractor
lowerCamelCase_ ,lowerCamelCase_ = prepare_audio()
lowerCamelCase_ = audio.squeeze().numpy()
lowerCamelCase_ = feature_extractor(SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowerCamelCase_ = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowerCamelCase_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowerCamelCase_ = torch.tensor([-0.8_760, -7.0_042, -8.6_602] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 384
| 0
|
'''simple docstring'''
def __lowerCamelCase ( __lowerCAmelCase : Any ) -> Union[str, Any]:
stooge(__lowerCAmelCase , 0 , len(__lowerCAmelCase ) - 1 )
return arr
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : int ) -> List[str]:
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
snake_case , snake_case = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
snake_case = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(__lowerCAmelCase , __lowerCAmelCase , (h - t) )
# Recursively sort last 2/3 elements
stooge(__lowerCAmelCase , i + t , (__lowerCAmelCase) )
# Recursively sort first 2/3 elements
stooge(__lowerCAmelCase , __lowerCAmelCase , (h - t) )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input("Enter numbers separated by a comma:\n").strip()
_SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(",")]
print(stooge_sort(unsorted))
| 369
|
'''simple docstring'''
from __future__ import annotations
_SCREAMING_SNAKE_CASE = 1.6021E-19 # units = C
def __lowerCamelCase ( __lowerCAmelCase : float , __lowerCAmelCase : float , __lowerCAmelCase : float , ) -> tuple[str, float]:
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("""You cannot supply more or less than 2 values""" )
elif conductivity < 0:
raise ValueError("""Conductivity cannot be negative""" )
elif electron_conc < 0:
raise ValueError("""Electron concentration cannot be negative""" )
elif mobility < 0:
raise ValueError("""mobility cannot be negative""" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369
| 1
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _a ( self ) -> List[Any]:
__UpperCamelCase =AutoModelForSeqaSeqLM.from_pretrained('google/mt5-small' , return_dict=A_ ).to(A_ )
__UpperCamelCase =AutoTokenizer.from_pretrained('google/mt5-small' )
__UpperCamelCase =tokenizer('Hello there' , return_tensors='pt' ).input_ids
__UpperCamelCase =tokenizer('Hi I am' , return_tensors='pt' ).input_ids
__UpperCamelCase =model(input_ids.to(A_ ) , labels=labels.to(A_ ) ).loss
__UpperCamelCase =-(labels.shape[-1] * loss.item())
__UpperCamelCase =-84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 682
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=None , A_=True , ) -> List[Any]:
__UpperCamelCase =size if size is not None else {'height': 18, 'width': 18}
__UpperCamelCase =parent
__UpperCamelCase =batch_size
__UpperCamelCase =num_channels
__UpperCamelCase =image_size
__UpperCamelCase =min_resolution
__UpperCamelCase =max_resolution
__UpperCamelCase =do_resize
__UpperCamelCase =size
__UpperCamelCase =apply_ocr
def _a ( self ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( A_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _a ( self ) -> Optional[Any]:
__UpperCamelCase =LayoutLMvaImageProcessingTester(self )
@property
def _a ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _a ( self ) -> List[Any]:
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , 'do_resize' ) )
self.assertTrue(hasattr(A_ , 'size' ) )
self.assertTrue(hasattr(A_ , 'apply_ocr' ) )
def _a ( self ) -> Dict:
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 18, 'width': 18} )
__UpperCamelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'height': 42, 'width': 42} )
def _a ( self ) -> Dict:
pass
def _a ( self ) -> Optional[Any]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
self.assertIsInstance(encoding.words , A_ )
self.assertIsInstance(encoding.boxes , A_ )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> int:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> List[str]:
# Initialize image_processing
__UpperCamelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCamelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
__UpperCamelCase =image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
# Test batched
__UpperCamelCase =image_processing(A_ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['height'],
self.image_processor_tester.size['width'],
) , )
def _a ( self ) -> Any:
# with apply_OCR = True
__UpperCamelCase =LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCamelCase =load_dataset('hf-internal-testing/fixtures_docvqa' , split='test' )
__UpperCamelCase =Image.open(ds[0]['file'] ).convert('RGB' )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCamelCase =[['11:14', 'to', '11:39', 'a.m', '11:39', 'to', '11:44', 'a.m.', '11:44', 'a.m.', 'to', '12:25', 'p.m.', '12:25', 'to', '12:58', 'p.m.', '12:58', 'to', '4:00', 'p.m.', '2:00', 'to', '5:00', 'p.m.', 'Coffee', 'Break', 'Coffee', 'will', 'be', 'served', 'for', 'men', 'and', 'women', 'in', 'the', 'lobby', 'adjacent', 'to', 'exhibit', 'area.', 'Please', 'move', 'into', 'exhibit', 'area.', '(Exhibits', 'Open)', 'TRRF', 'GENERAL', 'SESSION', '(PART', '|)', 'Presiding:', 'Lee', 'A.', 'Waller', 'TRRF', 'Vice', 'President', '“Introductory', 'Remarks”', 'Lee', 'A.', 'Waller,', 'TRRF', 'Vice', 'Presi-', 'dent', 'Individual', 'Interviews', 'with', 'TRRF', 'Public', 'Board', 'Members', 'and', 'Sci-', 'entific', 'Advisory', 'Council', 'Mem-', 'bers', 'Conducted', 'by', 'TRRF', 'Treasurer', 'Philip', 'G.', 'Kuehn', 'to', 'get', 'answers', 'which', 'the', 'public', 'refrigerated', 'warehousing', 'industry', 'is', 'looking', 'for.', 'Plus', 'questions', 'from', 'the', 'floor.', 'Dr.', 'Emil', 'M.', 'Mrak,', 'University', 'of', 'Cal-', 'ifornia,', 'Chairman,', 'TRRF', 'Board;', 'Sam', 'R.', 'Cecil,', 'University', 'of', 'Georgia', 'College', 'of', 'Agriculture;', 'Dr.', 'Stanley', 'Charm,', 'Tufts', 'University', 'School', 'of', 'Medicine;', 'Dr.', 'Robert', 'H.', 'Cotton,', 'ITT', 'Continental', 'Baking', 'Company;', 'Dr.', 'Owen', 'Fennema,', 'University', 'of', 'Wis-', 'consin;', 'Dr.', 'Robert', 'E.', 'Hardenburg,', 'USDA.', 'Questions', 'and', 'Answers', 'Exhibits', 'Open', 'Capt.', 'Jack', 'Stoney', 'Room', 'TRRF', 'Scientific', 'Advisory', 'Council', 'Meeting', 'Ballroom', 'Foyer']] # noqa: E231
__UpperCamelCase =[[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , A_ )
self.assertListEqual(encoding.boxes , A_ )
# with apply_OCR = False
__UpperCamelCase =LayoutLMvaImageProcessor(apply_ocr=A_ )
__UpperCamelCase =image_processing(A_ , return_tensors='pt' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 682
| 1
|
'''simple docstring'''
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
_SCREAMING_SNAKE_CASE : Optional[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[Any] = {}
_SCREAMING_SNAKE_CASE : int = {}
_SCREAMING_SNAKE_CASE : List[str] = {}
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None , ):
"""simple docstring"""
__magic_name__ : str = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
__magic_name__ : Tuple = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
__magic_name__ : Dict = format_type
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = None ):
"""simple docstring"""
__magic_name__ : List[Any] = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
__magic_name__ : int = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["python"])
_register_formatter(ArrowFormatter, "arrow", aliases=["pa", "pyarrow"])
_register_formatter(NumpyFormatter, "numpy", aliases=["np"])
_register_formatter(PandasFormatter, "pandas", aliases=["pd"])
_register_formatter(CustomFormatter, "custom")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, "torch", aliases=["pt", "pytorch"])
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ValueError("PyTorch needs to be installed to be able to return PyTorch tensors.")
_register_unavailable_formatter(_torch_error, "torch", aliases=["pt", "pytorch"])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, "tensorflow", aliases=["tf"])
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ValueError("Tensorflow needs to be installed to be able to return Tensorflow tensors.")
_register_unavailable_formatter(_tf_error, "tensorflow", aliases=["tf"])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, "jax", aliases=[])
else:
_SCREAMING_SNAKE_CASE : Optional[int] = ValueError("JAX needs to be installed to be able to return JAX arrays.")
_register_unavailable_formatter(_jax_error, "jax", aliases=[])
def _UpperCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCamelCase ( UpperCamelCase__ , **UpperCamelCase__ ):
"""simple docstring"""
__magic_name__ : Optional[int] = get_format_type_from_alias(lowerCamelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**lowerCamelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'""" )
| 436
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase( __lowerCamelCase ):
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : List[Any] = tempfile.mkdtemp()
__a : int = 8
# DPR tok
__a : Dict = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
__a : int = os.path.join(self.tmpdirname , 'dpr_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , DPR_VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
# BART tok
__a : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__a : Optional[int] = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__a : List[str] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__a : List[str] = {'unk_token': '<unk>'}
__a : Dict = os.path.join(self.tmpdirname , 'bart_tokenizer' )
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['vocab_file'] )
__a : Dict = os.path.join(SCREAMING_SNAKE_CASE__ , BART_VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(SCREAMING_SNAKE_CASE__ ) )
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'dpr_tokenizer' ) )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , 'bart_tokenizer' ) )
def __lowerCAmelCase ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : Tuple = os.path.join(self.tmpdirname , 'rag_tokenizer' )
__a : Optional[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__a : Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(SCREAMING_SNAKE_CASE__ )
rag_tokenizer.save_pretrained(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = RagTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ , config=SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , SCREAMING_SNAKE_CASE__ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : Optional[Any] = RagTokenizer.from_pretrained('facebook/rag-token-nq' )
__a : List[Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : Tuple = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Any = RagTokenizer.from_pretrained('facebook/rag-sequence-nq' )
__a : Union[str, Any] = [
'who got the first nobel prize in physics',
'when is the next deadpool movie being released',
'which mode is used for short wave broadcast service',
'who is the owner of reading football club',
'when is the next scandal episode coming out',
'when is the last time the philadelphia won the superbowl',
'what is the most current adobe flash player version',
'how many episodes are there in dragon ball z',
'what is the first step in the evolution of the eye',
'where is gall bladder situated in human body',
'what is the main mineral in lithium batteries',
'who is the president of usa right now',
'where do the greasers live in the outsiders',
'panda is a national animal of which country',
'what is the name of manchester united stadium',
]
__a : str = tokenizer(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
| 47
| 0
|
'''simple docstring'''
from random import randint, random
def _lowerCAmelCase ( __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : bool = False , __snake_case : bool = False , __snake_case : int = 5 , ) -> list:
__A : Dict = [[-1] * number_of_cells] # Create a highway without any car
__A : Union[str, Any] = 0
__A : str = max(__snake_case , 0 )
while i < number_of_cells:
__A : Any = (
randint(0 , __snake_case ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _lowerCAmelCase ( __snake_case : list , __snake_case : int ) -> int:
__A : Any = 0
__A : Tuple = highway_now[car_index + 1 :]
for cell in range(len(__snake_case ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__snake_case , -1 )
def _lowerCAmelCase ( __snake_case : list , __snake_case : float , __snake_case : int ) -> list:
__A : Optional[Any] = len(__snake_case )
# Beforce calculations, the highway is empty
__A : Optional[int] = [-1] * number_of_cells
for car_index in range(__snake_case ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
__A : Optional[int] = min(highway_now[car_index] + 1 , __snake_case )
# Number of empty cell before the next car
__A : int = get_distance(__snake_case , __snake_case ) - 1
# We can't have the car causing an accident
__A : int = min(next_highway[car_index] , __snake_case )
if random() < probability:
# Randomly, a driver will slow down
__A : Union[str, Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _lowerCAmelCase ( __snake_case : list , __snake_case : int , __snake_case : float , __snake_case : int ) -> list:
__A : Dict = len(highway[0] )
for i in range(__snake_case ):
__A : int = update(highway[i] , __snake_case , __snake_case )
__A : List[Any] = [-1] * number_of_cells
for car_index in range(__snake_case ):
__A : Dict = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
__A : Dict = (car_index + speed) % number_of_cells
# Commit the change of position
__A : Tuple = speed
highway.append(__snake_case )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 338
|
'''simple docstring'''
def _lowerCAmelCase ( __snake_case : Union[str, Any] , __snake_case : Tuple ) -> Union[str, Any]:
__A : Tuple = [0 for i in range(r + 1 )]
# nc0 = 1
__A : Dict = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__A : Union[str, Any] = min(__snake_case , __snake_case )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 338
| 1
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def _A ( lowerCamelCase ):
a__ : Tuple = tmp_path / "file.csv"
a__ : Optional[int] = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _A ( lowerCamelCase ):
a__ : Any = tmp_path / "malformed_file.csv"
a__ : Any = textwrap.dedent(
"\\n header1,header2\n 1,2\n 10,20,\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _A ( lowerCamelCase , lowerCamelCase ):
a__ : int = tmp_path / "csv_with_image.csv"
a__ : int = textwrap.dedent(
F"""\\n image\n {image_file}\n """ )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _A ( lowerCamelCase ):
a__ : Dict = tmp_path / "csv_with_label.csv"
a__ : int = textwrap.dedent(
"\\n label\n good\n bad\n good\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
@pytest.fixture
def _A ( lowerCamelCase ):
a__ : Dict = tmp_path / "csv_with_int_list.csv"
a__ : Any = textwrap.dedent(
"\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n " )
with open(_lowerCAmelCase , "w" ) as f:
f.write(_lowerCAmelCase )
return str(_lowerCAmelCase )
def _A ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
a__ : List[Any] = Csv()
a__ : Any = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_lowerCAmelCase , match="Error tokenizing data" ):
for _ in generator:
pass
assert any(
record.levelname == "ERROR"
and "Failed to read file" in record.message
and os.path.basename(_lowerCAmelCase ) in record.message
for record in caplog.records )
@require_pil
def _A ( lowerCamelCase ):
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
a__ : Any = f.read().splitlines()[1]
a__ : Optional[Any] = Csv(encoding="utf-8" , features=Features({"image": Image()} ) )
a__ : Union[str, Any] = csv._generate_tables([[csv_file_with_image]] )
a__ : List[str] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("image" ).type == Image()()
a__ : int = pa_table.to_pydict()["image"]
assert generated_content == [{"path": image_file, "bytes": None}]
def _A ( lowerCamelCase ):
with open(_lowerCAmelCase , encoding="utf-8" ) as f:
a__ : List[Any] = f.read().splitlines()[1:]
a__ : int = Csv(encoding="utf-8" , features=Features({"label": ClassLabel(names=["good", "bad"] )} ) )
a__ : Tuple = csv._generate_tables([[csv_file_with_label]] )
a__ : int = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field("label" ).type == ClassLabel(names=["good", "bad"] )()
a__ : Union[str, Any] = pa_table.to_pydict()["label"]
assert generated_content == [ClassLabel(names=["good", "bad"] ).straint(_lowerCAmelCase ) for label in labels]
def _A ( lowerCamelCase ):
a__ : Dict = Csv(encoding="utf-8" , sep="," , converters={"int_list": lambda lowerCamelCase : [int(_lowerCAmelCase ) for i in x.split()]} )
a__ : List[Any] = csv._generate_tables([[csv_file_with_int_list]] )
a__ : Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field("int_list" ).type )
a__ : Optional[Any] = pa_table.to_pydict()["int_list"]
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 112
|
'''simple docstring'''
import functools
def A_ ( _lowerCAmelCase : list[int] , _lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(_lowerCAmelCase ) != 3 or not all(isinstance(_lowerCAmelCase , _lowerCAmelCase ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(_lowerCAmelCase ) == 0:
return 0
if min(_lowerCAmelCase ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(_lowerCAmelCase ) >= 366:
raise ValueError("All days elements should be less than 366" )
_lowerCamelCase : Union[str, Any] = set(_lowerCAmelCase )
@functools.cache
def dynamic_programming(_lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44
| 0
|
"""simple docstring"""
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Dict ):
debug_launcher(test_script.main )
def lowerCamelCase ( self :Union[str, Any] ):
debug_launcher(test_ops.main )
| 721
|
"""simple docstring"""
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
_snake_case : List[str] = Lock()
def A__ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(UpperCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
A = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
A = min(UpperCamelCase , UpperCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(UpperCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
A = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
A = max(UpperCamelCase , UpperCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(UpperCamelCase )
def A__ ( UpperCamelCase ):
A = []
A = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
A = temp_rs
A = temp_rr
for i in range(1 , len(UpperCamelCase ) - 1 ):
A = Pipe()
A = Pipe()
process_array_.append(
Process(
target=UpperCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
A = temp_rs
A = temp_rr
process_array_.append(
Process(
target=UpperCamelCase , args=(
len(UpperCamelCase ) - 1,
arr[len(UpperCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(UpperCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(UpperCamelCase ) ):
A = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def A__ ( ):
A = list(range(10 , 0 , -1 ) )
print("Initial List" )
print(*UpperCamelCase )
A = odd_even_transposition(UpperCamelCase )
print("Sorted List\n" )
print(*UpperCamelCase )
if __name__ == "__main__":
main()
| 524
| 0
|
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ : Dict = "platform"
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def UpperCamelCase ( _A : List[str] , _A : Union[str, Any] , _A : Union[str, Any]=None , _A : Tuple=None , _A : List[str]=None , _A : Tuple=None , _A : int=None , _A : str=None , )-> List[Any]:
"""simple docstring"""
if attention_mask is None:
A__ = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
A__ = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
A__ = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
A__ = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=7 , UpperCAmelCase__=True , UpperCAmelCase__=False , UpperCAmelCase__=99 , UpperCAmelCase__=16 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=4 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=32 , UpperCAmelCase__=2 , UpperCAmelCase__=1 , UpperCAmelCase__=0 , UpperCAmelCase__=0.02 , ):
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = eos_token_id
A__ = pad_token_id
A__ = bos_token_id
A__ = initializer_range
def __A ( self ):
A__ = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
A__ = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
A__ = shift_tokens_right(__lowerCAmelCase , 1 , 2 )
A__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=__lowerCAmelCase , )
A__ = prepare_blenderbot_inputs_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return config, inputs_dict
def __A ( self ):
A__ , A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = 20
A__ = model_class_name(__lowerCAmelCase )
A__ = model.encode(inputs_dict["input_ids"] )
A__ , A__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
A__ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
A__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__lowerCAmelCase , )
A__ = model.decode(__lowerCAmelCase , __lowerCAmelCase )
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = 20
A__ = model_class_name(__lowerCAmelCase )
A__ = model.encode(inputs_dict["input_ids"] )
A__ , A__ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
A__ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
A__ = model.init_cache(decoder_input_ids.shape[0] , __lowerCAmelCase , __lowerCAmelCase )
A__ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
A__ = model.decode(
decoder_input_ids[:, :-1] , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , past_key_values=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
A__ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
A__ = model.decode(
decoder_input_ids[:, -1:] , __lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__lowerCAmelCase , decoder_position_ids=__lowerCAmelCase , )
A__ = model.decode(__lowerCAmelCase , __lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase )
A__ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class UpperCamelCase ( unittest.TestCase ):
lowerCAmelCase : List[str] = 99
def __A ( self ):
A__ = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
A__ = input_ids.shape[0]
A__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def __A ( self ):
A__ , A__ , A__ = self._get_config_and_data()
A__ = FlaxBlenderbotSmallForConditionalGeneration(__lowerCAmelCase )
A__ = lm_model(input_ids=__lowerCAmelCase )
A__ = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCAmelCase )
def __A ( self ):
A__ = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
A__ = FlaxBlenderbotSmallForConditionalGeneration(__lowerCAmelCase )
A__ = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
A__ = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
A__ = lm_model(input_ids=__lowerCAmelCase , decoder_input_ids=__lowerCAmelCase )
A__ = (*summary.shape, config.vocab_size)
self.assertEqual(outputs["logits"].shape , __lowerCAmelCase )
def __A ( self ):
A__ = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
A__ = shift_tokens_right(__lowerCAmelCase , 1 , 2 )
A__ = np.equal(__lowerCAmelCase , 1 ).astype(np.floataa ).sum()
A__ = np.equal(__lowerCAmelCase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(__lowerCAmelCase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class UpperCamelCase ( _UpperCAmelCase , unittest.TestCase , _UpperCAmelCase ):
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Optional[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def __A ( self ):
A__ = FlaxBlenderbotSmallModelTester(self )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase )
A__ = model_class(__lowerCAmelCase )
@jax.jit
def encode_jitted(UpperCAmelCase__ , UpperCAmelCase__=None , **UpperCAmelCase__ ):
return model.encode(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )
with self.subTest("JIT Enabled" ):
A__ = encode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A__ = encode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A__ = model_class(__lowerCAmelCase )
A__ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
A__ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
return model.decode(
decoder_input_ids=__lowerCAmelCase , decoder_attention_mask=__lowerCAmelCase , encoder_outputs=__lowerCAmelCase , )
with self.subTest("JIT Enabled" ):
A__ = decode_jitted(**__lowerCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A__ = decode_jitted(**__lowerCAmelCase ).to_tuple()
self.assertEqual(len(__lowerCAmelCase ) , len(__lowerCAmelCase ) )
for jitted_output, output in zip(__lowerCAmelCase , __lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
A__ = model_class_name.from_pretrained("facebook/blenderbot_small-90M" )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
A__ = np.ones((1, 1) ) * model.config.eos_token_id
A__ = model(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
| 491
|
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
a_ = logging.get_logger(__name__)
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
def __init__( self: Tuple , *__lowerCAmelCase: str , **__lowerCAmelCase: Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
"The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use VideoMAEImageProcessor instead." , __lowerCAmelCase , )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
| 221
| 0
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class __lowercase ( __lowerCamelCase ):
snake_case_ = """luke"""
def __init__( self : int ,A : List[Any]=50_267 ,A : Optional[int]=500_000 ,A : Any=768 ,A : str=256 ,A : List[str]=12 ,A : int=12 ,A : Tuple=3_072 ,A : Dict="gelu" ,A : Optional[int]=0.1 ,A : int=0.1 ,A : Dict=512 ,A : List[str]=2 ,A : List[Any]=0.0_2 ,A : Optional[Any]=1e-12 ,A : Any=True ,A : Union[str, Any]=None ,A : Any=1 ,A : Optional[Any]=0 ,A : List[Any]=2 ,**A : Optional[int] ,):
'''simple docstring'''
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
UpperCAmelCase__ : List[str] = vocab_size
UpperCAmelCase__ : Optional[int] = entity_vocab_size
UpperCAmelCase__ : Any = hidden_size
UpperCAmelCase__ : List[str] = entity_emb_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : List[Any] = hidden_act
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase__ : Optional[int] = max_position_embeddings
UpperCAmelCase__ : List[str] = type_vocab_size
UpperCAmelCase__ : str = initializer_range
UpperCAmelCase__ : List[str] = layer_norm_eps
UpperCAmelCase__ : Union[str, Any] = use_entity_aware_attention
UpperCAmelCase__ : Any = classifier_dropout
| 713
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 194
| 0
|
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
lowercase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *__UpperCamelCase , **__UpperCamelCase ) -> None:
'''simple docstring'''
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
| 327
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowerCamelCase__ )
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ):
"""simple docstring"""
lowercase : str = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
lowercase : ClassVar[Features] = Features({'text': Value('string' )} )
lowercase : ClassVar[Features] = Features({'labels': ClassLabel} )
lowercase : str = "text"
lowercase : str = "labels"
def __lowerCamelCase ( self , __UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __UpperCamelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
__UpperCamelCase : Any = copy.deepcopy(self )
__UpperCamelCase : Any = self.label_schema.copy()
__UpperCamelCase : List[Any] = features[self.label_column]
__UpperCamelCase : Any = label_schema
return task_template
@property
def __lowerCamelCase ( self ) -> Dict[str, str]:
'''simple docstring'''
return {
self.text_column: "text",
self.label_column: "labels",
}
| 327
| 1
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( UpperCamelCase : list[float] ):
if len(UpperCamelCase ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
UpperCAmelCase : Optional[Any] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
A: List[Any] = (3, 9, -1_1, 0, 7, 5, 1, -1)
A: Union[str, Any] = (4, 6, 2, 0, 8, 1_0, 3, -2)
@dataclass
class SCREAMING_SNAKE_CASE__ :
__lowerCAmelCase : int
__lowerCAmelCase : Node | None
class SCREAMING_SNAKE_CASE__ :
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
UpperCAmelCase : Node | None = None
for i in sorted(_SCREAMING_SNAKE_CASE , reverse=_SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Union[str, Any] = Node(_SCREAMING_SNAKE_CASE , self.head )
def __iter__( self ) -> Iterator[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.head
while node:
yield node.data
UpperCAmelCase : List[str] = node.next_node
def __len__( self ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self ) -> str:
'''simple docstring'''
return " -> ".join([str(_SCREAMING_SNAKE_CASE ) for node in self] )
def _snake_case ( UpperCamelCase : SortedLinkedList , UpperCamelCase : SortedLinkedList ):
return SortedLinkedList(list(UpperCamelCase ) + list(UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
A: Dict = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 359
| 0
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowercase_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : List[Any] = ['pixel_values']
def __init__( self : List[str] , a : bool = True , a : Dict[str, int] = None , a : int = 0.9 , a : PILImageResampling = PILImageResampling.BICUBIC , a : bool = True , a : Dict[str, int] = None , a : Union[int, float] = 1 / 255 , a : bool = True , a : bool = True , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , **a : int , )-> None:
"""simple docstring"""
super().__init__(**a )
lowercase__ = size if size is not None else {'shortest_edge': 224}
lowercase__ = get_size_dict(a , default_to_square=a )
lowercase__ = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowercase__ = get_size_dict(a , param_name='crop_size' )
lowercase__ = do_resize
lowercase__ = size
lowercase__ = crop_pct
lowercase__ = resample
lowercase__ = do_center_crop
lowercase__ = crop_size
lowercase__ = do_rescale
lowercase__ = rescale_factor
lowercase__ = do_normalize
lowercase__ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowercase__ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : np.ndarray , a : Dict[str, int] , a : Optional[float] = None , a : PILImageResampling = PILImageResampling.BICUBIC , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a , default_to_square=a )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(f"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowercase__ = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowercase__ = int(size['height'] / crop_pct )
else:
lowercase__ = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(a ) )
lowercase__ = get_resize_output_image_size(a , size=a , default_to_square=a )
else:
if "shortest_edge" in size:
lowercase__ = get_resize_output_image_size(a , size=size['shortest_edge'] , default_to_square=a )
elif "height" in size and "width" in size:
lowercase__ = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(a ) )
return resize(a , size=a , resample=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , a : np.ndarray , a : Dict[str, int] , a : Optional[Union[str, ChannelDimension]] = None , **a : Tuple , )-> np.ndarray:
"""simple docstring"""
lowercase__ = get_size_dict(a )
if "height" not in size or "width" not in size:
raise ValueError(f"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(a , size=(size['height'], size['width']) , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , a : np.ndarray , a : Union[int, float] , a : Optional[Union[str, ChannelDimension]] = None , **a : List[str] , )-> Optional[Any]:
"""simple docstring"""
return rescale(a , scale=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : np.ndarray , a : Union[float, List[float]] , a : Union[float, List[float]] , a : Optional[Union[str, ChannelDimension]] = None , **a : str , )-> np.ndarray:
"""simple docstring"""
return normalize(a , mean=a , std=a , data_format=a , **a )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , a : ImageInput , a : bool = None , a : Dict[str, int] = None , a : int = None , a : PILImageResampling = None , a : bool = None , a : Dict[str, int] = None , a : bool = None , a : float = None , a : bool = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[float, List[float]]] = None , a : Optional[Union[str, TensorType]] = None , a : ChannelDimension = ChannelDimension.FIRST , **a : Any , )-> PIL.Image.Image:
"""simple docstring"""
lowercase__ = do_resize if do_resize is not None else self.do_resize
lowercase__ = crop_pct if crop_pct is not None else self.crop_pct
lowercase__ = resample if resample is not None else self.resample
lowercase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase__ = do_rescale if do_rescale is not None else self.do_rescale
lowercase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase__ = do_normalize if do_normalize is not None else self.do_normalize
lowercase__ = image_mean if image_mean is not None else self.image_mean
lowercase__ = image_std if image_std is not None else self.image_std
lowercase__ = size if size is not None else self.size
lowercase__ = get_size_dict(a , default_to_square=a )
lowercase__ = crop_size if crop_size is not None else self.crop_size
lowercase__ = get_size_dict(a , param_name='crop_size' )
lowercase__ = make_list_of_images(a )
if not valid_images(a ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowercase__ = [to_numpy_array(a ) for image in images]
if do_resize:
lowercase__ = [self.resize(image=a , size=a , crop_pct=a , resample=a ) for image in images]
if do_center_crop:
lowercase__ = [self.center_crop(image=a , size=a ) for image in images]
if do_rescale:
lowercase__ = [self.rescale(image=a , scale=a ) for image in images]
if do_normalize:
lowercase__ = [self.normalize(image=a , mean=a , std=a ) for image in images]
lowercase__ = [to_channel_dimension_format(a , a ) for image in images]
lowercase__ = {'pixel_values': images}
return BatchFeature(data=a , tensor_type=a )
| 235
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {"""vocab_file""": """spm_char.model"""}
lowercase_ = {
"""vocab_file""": {
"""microsoft/speecht5_asr""": """https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model""",
"""microsoft/speecht5_tts""": """https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model""",
"""microsoft/speecht5_vc""": """https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model""",
}
}
lowercase_ = {
"""microsoft/speecht5_asr""": 1_024,
"""microsoft/speecht5_tts""": 1_024,
"""microsoft/speecht5_vc""": 1_024,
}
class SCREAMING_SNAKE_CASE (UpperCAmelCase ):
_UpperCamelCase : str = VOCAB_FILES_NAMES
_UpperCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[int] , a : Any , a : Any="<s>" , a : List[Any]="</s>" , a : List[str]="<unk>" , a : Any="<pad>" , a : Optional[Dict[str, Any]] = None , **a : Optional[Any] , )-> None:
"""simple docstring"""
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=a , eos_token=a , unk_token=a , pad_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , )
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a )
@property
def SCREAMING_SNAKE_CASE_ ( self : Any )-> Tuple:
"""simple docstring"""
return self.sp_model.get_piece_size()
def SCREAMING_SNAKE_CASE_ ( self : int )-> Tuple:
"""simple docstring"""
lowercase__ = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] )-> str:
"""simple docstring"""
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__( self : Dict , a : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , a : str )-> List[str]:
"""simple docstring"""
return self.sp_model.encode(a , out_type=a )
def SCREAMING_SNAKE_CASE_ ( self : int , a : Optional[Any] )-> str:
"""simple docstring"""
return self.sp_model.piece_to_id(a )
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] )-> Dict:
"""simple docstring"""
lowercase__ = self.sp_model.IdToPiece(a )
return token
def SCREAMING_SNAKE_CASE_ ( self : str , a : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = []
lowercase__ = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a ) + token
lowercase__ = []
else:
current_sub_tokens.append(a )
out_string += self.sp_model.decode(a )
return out_string.strip()
def SCREAMING_SNAKE_CASE_ ( self : str , a : List[Any] , a : Optional[Any]=None )-> List[int]:
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def SCREAMING_SNAKE_CASE_ ( self : int , a : List[int] , a : Optional[List[int]] = None , a : bool = False )-> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
lowercase__ = [1]
if token_ids_a is None:
return ([0] * len(a )) + suffix_ones
return ([0] * len(a )) + ([0] * len(a )) + suffix_ones
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , a : str , a : Optional[str] = None )-> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a )
elif not os.path.isfile(self.vocab_file ):
with open(a , 'wb' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(a )
return (out_vocab_file,)
| 235
| 1
|
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class snake_case__ ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
snake_case__ : Any = 'hf-internal-testing/tiny-random-t5'
snake_case__ : str = AutoTokenizer.from_pretrained(_lowerCAmelCase )
snake_case__ : str = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
snake_case__ : Any = tokenizer('This is me' , return_tensors='pt' )
snake_case__ : str = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
snake_case__ : List[str] = model.generate(**_lowerCAmelCase )
snake_case__ : Optional[Any] = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCAmelCase )
snake_case__ : Any = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
snake_case__ : int = model_reloaded.generate(**_lowerCAmelCase )
self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase ) )
def UpperCAmelCase__ ( self : List[str] ):
snake_case__ : Dict = 'hf-internal-testing/tiny-random-t5'
snake_case__ : Tuple = AutoModelForSeqaSeqLM.from_pretrained(_lowerCAmelCase )
snake_case__ : Union[str, Any] = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(_lowerCAmelCase ):
model.save_pretrained(_lowerCAmelCase )
snake_case__ : List[Any] = model.reverse_bettertransformer()
model.save_pretrained(_lowerCAmelCase )
| 709
|
from __future__ import annotations
from math import ceil, floor, sqrt
def lowercase__( A = 2_0_0_0_0_0_0 ):
snake_case__ : list[int] = [0]
snake_case__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
snake_case__ : int = 0
# the area corresponding to the grid that gives the product closest to target
snake_case__ : int = 0
# an estimate of b, using the quadratic formula
snake_case__ : float
# the largest integer less than b_estimate
snake_case__ : int
# the largest integer less than b_estimate
snake_case__ : int
# the triangle number corresponding to b_floor
snake_case__ : int
# the triangle number corresponding to b_ceil
snake_case__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
snake_case__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
snake_case__ : List[Any] = floor(A )
snake_case__ : List[str] = ceil(A )
snake_case__ : List[str] = triangle_numbers[b_floor]
snake_case__ : Dict = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
snake_case__ : str = triangle_b_first_guess * triangle_a
snake_case__ : int = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
snake_case__ : Union[str, Any] = triangle_b_second_guess * triangle_a
snake_case__ : List[str] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F"""{solution() = }""")
| 303
| 0
|
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
_lowerCAmelCase : List[Any] = logging.get_logger(__name__)
class lowerCAmelCase :
'''simple docstring'''
def __init__( self : str , __snake_case : Union[str, Any] = None , __snake_case : Optional[Any] = None , __snake_case : List[str]=None , __snake_case : int=None ) -> Any:
'''simple docstring'''
if not conversation_id:
lowerCamelCase = uuid.uuida()
if past_user_inputs is None:
lowerCamelCase = []
if generated_responses is None:
lowerCamelCase = []
lowerCamelCase = conversation_id
lowerCamelCase = past_user_inputs
lowerCamelCase = generated_responses
lowerCamelCase = text
def __eq__( self : List[Any] , __snake_case : str ) -> Any:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : List[Any] , __snake_case : Optional[Any] = False ) -> List[Any]:
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten '''
F'''with: "{text}".''' )
lowerCamelCase = text
else:
logger.warning(
F'''User input added while unprocessed input was existing: "{self.new_user_input}" new input '''
F'''ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input''' )
else:
lowerCamelCase = text
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
lowerCamelCase = None
def lowerCamelCase__ ( self : Optional[int] , __snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
self.generated_responses.append(__snake_case )
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = F'''Conversation id: {self.uuid} \n'''
for is_user, text in self.iter_texts():
lowerCamelCase = """user""" if is_user else """bot"""
output += F'''{name} >> {text} \n'''
return output
@add_end_docstrings(
__UpperCamelCase , R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ' , )
class lowerCAmelCase ( __UpperCamelCase ):
'''simple docstring'''
def __init__( self : Tuple , *__snake_case : Any , **__snake_case : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(*__snake_case , **__snake_case )
if self.tokenizer.pad_token_id is None:
lowerCamelCase = self.tokenizer.eos_token
def lowerCamelCase__ ( self : List[Any] , __snake_case : Tuple=None , __snake_case : str=None , __snake_case : Optional[int]=None , **__snake_case : Optional[int] ) -> Tuple:
'''simple docstring'''
lowerCamelCase = {}
lowerCamelCase = {}
lowerCamelCase = {}
if min_length_for_response is not None:
lowerCamelCase = min_length_for_response
if minimum_tokens is not None:
lowerCamelCase = minimum_tokens
if "max_length" in generate_kwargs:
lowerCamelCase = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
lowerCamelCase = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(__snake_case )
return preprocess_params, forward_params, postprocess_params
def __call__( self : int , __snake_case : Optional[Any] , __snake_case : int=0 , **__snake_case : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase = super().__call__(__snake_case , num_workers=__snake_case , **__snake_case )
if isinstance(__snake_case , __snake_case ) and len(__snake_case ) == 1:
return outputs[0]
return outputs
def lowerCamelCase__ ( self : List[Any] , __snake_case : Any , __snake_case : List[Any]=32 ) -> Dict[str, Any]:
'''simple docstring'''
if not isinstance(__snake_case , __snake_case ):
raise ValueError('ConversationalPipeline, expects Conversation as inputs' )
if conversation.new_user_input is None:
raise ValueError(
F'''Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. '''
'Add user inputs with the conversation\'s `add_user_input` method' )
if hasattr(self.tokenizer , '_build_conversation_input_ids' ):
lowerCamelCase = self.tokenizer._build_conversation_input_ids(__snake_case )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
lowerCamelCase = self._legacy_parse_and_tokenize(__snake_case )
if self.framework == "pt":
lowerCamelCase = torch.LongTensor([input_ids] )
elif self.framework == "tf":
lowerCamelCase = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : str , __snake_case : Union[str, Any]=10 , **__snake_case : Tuple ) -> int:
'''simple docstring'''
lowerCamelCase = generate_kwargs.get('max_length' , self.model.config.max_length )
lowerCamelCase = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(F'''Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})''' )
lowerCamelCase = max_length - minimum_tokens
lowerCamelCase = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
lowerCamelCase = model_inputs["""attention_mask"""][:, -trim:]
lowerCamelCase = model_inputs.pop('conversation' )
lowerCamelCase = max_length
lowerCamelCase = self.model.generate(**__snake_case , **__snake_case )
if self.model.config.is_encoder_decoder:
lowerCamelCase = 1
else:
lowerCamelCase = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowerCamelCase__ ( self : Optional[int] , __snake_case : List[str] , __snake_case : List[str]=True ) -> List[str]:
'''simple docstring'''
lowerCamelCase = model_outputs["""output_ids"""]
lowerCamelCase = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=__snake_case , clean_up_tokenization_spaces=__snake_case , )
lowerCamelCase = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(__snake_case )
return conversation
def lowerCamelCase__ ( self : Any , __snake_case : List[str] ) -> Dict:
'''simple docstring'''
lowerCamelCase = self.tokenizer.eos_token_id
lowerCamelCase = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(__snake_case , add_special_tokens=__snake_case ) )
if len(__snake_case ) > self.tokenizer.model_max_length:
lowerCamelCase = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 246
|
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __snake_case ( __A ) -> Any:
def wrapper(*__A ,**__A ):
lowercase : Tuple = timeit.default_timer()
lowercase : List[Any] = func(*__A ,**__A )
lowercase : str = timeit.default_timer() - starttime
return delta
lowercase : Optional[int] = func.__name__
return wrapper
def __snake_case ( __A ,__A=100 ,__A=None ) -> Optional[int]:
lowercase : List[Any] = []
lowercase : Tuple = seq_shapes or {}
for i in range(__A ):
lowercase : int = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__A ,_ArrayXD ):
lowercase : Optional[Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__A ,datasets.Value ):
if v.dtype == "string":
lowercase : Tuple = """The small grey turtle was surprisingly fast when challenged."""
else:
lowercase : Optional[Any] = np.random.randint(10 ,size=1 ).astype(v.dtype ).item()
elif isinstance(__A ,datasets.Sequence ):
while isinstance(__A ,datasets.Sequence ):
lowercase : List[Any] = v.feature
lowercase : Optional[int] = seq_shapes[k]
lowercase : List[str] = np.random.rand(*__A ).astype(v.dtype )
lowercase : Any = data
dummy_data.append((i, example) )
return dummy_data
def __snake_case ( __A ,__A ,__A=100 ,__A=None ) -> Optional[Any]:
lowercase : Tuple = generate_examples(__A ,num_examples=__A ,seq_shapes=__A )
with ArrowWriter(features=__A ,path=__A ) as writer:
for key, record in dummy_data:
lowercase : int = features.encode_example(__A )
writer.write(__A )
lowercase , lowercase : List[Any] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F'''Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.''' )
lowercase : Dict = datasets.Dataset.from_file(filename=__A ,info=datasets.DatasetInfo(features=__A ) )
return dataset
| 607
| 0
|
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase =re.compile(R"\s+")
def snake_case ( a_ : List[Any] ) -> str:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(a_ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def snake_case ( a_ : int ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = [len(a_ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(a_ ), "line_max": max(a_ )}
def snake_case ( a_ : List[str] ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def snake_case ( a_ : List[Any] , a_ : str ) -> Tuple:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def snake_case ( a_ : Optional[int] , a_ : Tuple=5 ) -> int:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = ["""auto-generated""", """autogenerated""", """automatically generated"""]
UpperCamelCase_ : Any = example["""content"""].splitlines()
for _, line in zip(range(a_ ) , a_ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def snake_case ( a_ : Union[str, Any] , a_ : Any=5 , a_ : List[str]=0.05 ) -> Any:
"""simple docstring"""
UpperCamelCase_ : List[Any] = ["""unit tests""", """test file""", """configuration file"""]
UpperCamelCase_ : Union[str, Any] = example["""content"""].splitlines()
UpperCamelCase_ : List[str] = 0
UpperCamelCase_ : Optional[int] = 0
# first test
for _, line in zip(range(a_ ) , a_ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
UpperCamelCase_ : str = example["""content"""].count("""\n""" )
UpperCamelCase_ : List[Any] = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def snake_case ( a_ : Dict ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Tuple = ["""def """, """class """, """for """, """while """]
UpperCamelCase_ : Optional[Any] = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def snake_case ( a_ : List[str] , a_ : Tuple=4 ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = example["""content"""].splitlines()
UpperCamelCase_ : Any = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def snake_case ( a_ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Any = tokenizer(example["""content"""] , truncation=a_ )["""input_ids"""]
UpperCamelCase_ : Any = len(example["""content"""] ) / len(a_ )
return {"ratio": ratio}
def snake_case ( a_ : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = {}
results.update(get_hash(a_ ) )
results.update(line_stats(a_ ) )
results.update(alpha_stats(a_ ) )
results.update(char_token_ratio(a_ ) )
results.update(is_autogenerated(a_ ) )
results.update(is_config_or_test(a_ ) )
results.update(has_no_keywords(a_ ) )
results.update(has_few_assignments(a_ ) )
return results
def snake_case ( a_ : Optional[Any] , a_ : Union[str, Any] , a_ : int ) -> int:
"""simple docstring"""
if not check_uniques(a_ , a_ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def snake_case ( a_ : Optional[int] ) -> int:
"""simple docstring"""
with open(a_ , """rb""" ) as f_in:
with gzip.open(str(a_ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(a_ , a_ )
os.unlink(a_ )
# Settings
UpperCamelCase =HfArgumentParser(PreprocessingArguments)
UpperCamelCase =parser.parse_args()
if args.num_workers is None:
UpperCamelCase =multiprocessing.cpu_count()
UpperCamelCase =AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase =time.time()
UpperCamelCase =load_dataset(args.dataset_name, split="train")
print(f"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
UpperCamelCase =time.time()
UpperCamelCase =ds.map(preprocess, num_proc=args.num_workers)
print(f"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
UpperCamelCase =set(ds.unique("hash"))
UpperCamelCase =len(uniques) / len(ds)
print(f"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
UpperCamelCase =time.time()
UpperCamelCase =ds.filter(filter, fn_kwargs={"uniques": uniques, "args": args})
print(f"Time to filter dataset: {time.time()-t_start:.2f}")
print(f"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase =time.time()
UpperCamelCase , UpperCamelCase =deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(f"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(f"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
UpperCamelCase =Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / "duplicate_clusters.json", "w") as f:
json.dump(duplicate_clusters, f)
UpperCamelCase =output_dir / "data"
data_dir.mkdir(exist_ok=True)
UpperCamelCase =time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase =str(data_dir / f"file-{file_number+1:012}.json")
UpperCamelCase =min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(f"Time to save dataset: {time.time()-t_start:.2f}")
| 543
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("3.8"):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase =""
if version.parse(importlib_metadata.version("jiwer")) < version.parse("2.3.0"):
class A ( tr.AbstractTransform ):
"""simple docstring"""
def __init__( self , __lowerCAmelCase = " " ):
UpperCamelCase_ : str = sentence_delimiter
def _UpperCAmelCase ( self , __lowerCAmelCase ):
return list(__lowerCAmelCase )
def _UpperCAmelCase ( self , __lowerCAmelCase ):
UpperCamelCase_ : List[Any] = []
for sent_idx, sentence in enumerate(__lowerCAmelCase ):
chars.extend(self.process_string(__lowerCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(__lowerCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase =tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase =tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase ="\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
UpperCamelCase ="\\nCharacter error rate (CER) is a common metric of the performance of an automatic speech recognition system.\n\nCER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.\n\nCharacter error rate can be computed as:\n\nCER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct characters,\nN is the number of characters in the reference (N=S+D+C).\n\nCER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the\nperformance of the ASR system with a CER of 0 being a perfect score.\n"
UpperCamelCase ="\nComputes CER score of transcribed segments against references.\nArgs:\n references: list of references for each speech input.\n predictions: list of transcribtions to score.\n concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.\nReturns:\n (float): the character error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> cer = datasets.load_metric(\"cer\")\n >>> cer_score = cer.compute(predictions=predictions, references=references)\n >>> print(cer_score)\n 0.34146341463414637\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ):
if concatenate_texts:
return jiwer.compute_measures(
__lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , )["wer"]
UpperCamelCase_ : Optional[Any] = 0
UpperCamelCase_ : str = 0
for prediction, reference in zip(__lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase_ : Optional[int] = jiwer.compute_measures(
__lowerCAmelCase , __lowerCAmelCase , truth_transform=__lowerCAmelCase , hypothesis_transform=__lowerCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 543
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCamelCase ( self ):
A_ : Optional[int] = 1
A_ : Optional[int] = 3
A_ : List[str] = (32, 32)
A_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCAmelCase )
return image
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
A_ : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(__UpperCAmelCase )
@property
def _lowerCamelCase ( self ):
def extract(*a__ , **a__ ):
class _UpperCAmelCase :
def __init__( self ):
A_ : Tuple = torch.ones([0] )
def _lowerCamelCase ( self , a__ ):
self.pixel_values.to(__UpperCAmelCase )
return self
return Out()
return extract
def _lowerCamelCase ( self ):
A_ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Any = self.dummy_cond_unet
A_ : Union[str, Any] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
A_ : Optional[Any] = self.dummy_vae
A_ : Union[str, Any] = self.dummy_text_encoder
A_ : str = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A_ : Optional[Any] = 77
A_ : Any = self.dummy_image.to(__UpperCAmelCase )
A_ : Union[str, Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
A_ : List[Any] = AltDiffusionImgaImgPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
A_ : Any = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase )
A_ : Tuple = alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A_ : Union[str, Any] = """A painting of a squirrel eating a burger"""
A_ : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
A_ : Optional[Any] = alt_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , )
A_ : Union[str, Any] = output.images
A_ : Tuple = torch.Generator(device=__UpperCAmelCase ).manual_seed(0 )
A_ : Optional[Any] = alt_pipe(
[prompt] , generator=__UpperCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , return_dict=__UpperCAmelCase , )[0]
A_ : Dict = image[0, -3:, -3:, -1]
A_ : Union[str, Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A_ : Any = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCamelCase ( self ):
A_ : Optional[int] = self.dummy_cond_unet
A_ : Tuple = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
A_ : Optional[Any] = self.dummy_vae
A_ : int = self.dummy_text_encoder
A_ : Optional[int] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
A_ : Optional[int] = 77
A_ : Dict = self.dummy_image.to(__UpperCAmelCase )
# put models in fp16
A_ : Optional[Any] = unet.half()
A_ : List[str] = vae.half()
A_ : Union[str, Any] = bert.half()
# make sure here that pndm scheduler skips prk
A_ : Optional[int] = AltDiffusionImgaImgPipeline(
unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , vae=__UpperCAmelCase , text_encoder=__UpperCAmelCase , tokenizer=__UpperCAmelCase , safety_checker=__UpperCAmelCase , feature_extractor=self.dummy_extractor , )
A_ : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__UpperCAmelCase )
A_ : Tuple = alt_pipe.to(__UpperCAmelCase )
alt_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
A_ : Optional[Any] = """A painting of a squirrel eating a burger"""
A_ : Optional[int] = torch.manual_seed(0 )
A_ : Optional[int] = alt_pipe(
[prompt] , generator=__UpperCAmelCase , num_inference_steps=2 , output_type="""np""" , image=__UpperCAmelCase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _lowerCamelCase ( self ):
A_ : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
A_ : Optional[int] = init_image.resize((760, 504) )
A_ : int = """BAAI/AltDiffusion"""
A_ : Union[str, Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
A_ : int = """A fantasy landscape, trending on artstation"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : List[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="""np""" , )
A_ : Optional[Any] = output.images[0]
A_ : Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
A_ : List[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self ):
A_ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
A_ : Union[str, Any] = init_image.resize((768, 512) )
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
A_ : Union[str, Any] = """BAAI/AltDiffusion"""
A_ : List[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
__UpperCAmelCase , safety_checker=__UpperCAmelCase , )
pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
pipe.enable_attention_slicing()
A_ : List[str] = """A fantasy landscape, trending on artstation"""
A_ : Optional[Any] = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(
prompt=__UpperCAmelCase , image=__UpperCAmelCase , strength=0.75 , guidance_scale=7.5 , generator=__UpperCAmelCase , output_type="""np""" , )
A_ : List[str] = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 569
|
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( a :Optional[Any] ) -> List[Any]:
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
a = k.replace(a , a )
if k.startswith('''encoder''' ):
a = k.replace('''.attn''' , '''.self_attn''' )
a = k.replace('''norm1''' , '''self_attn_layer_norm''' )
a = k.replace('''norm2''' , '''final_layer_norm''' )
elif k.startswith('''decoder''' ):
a = k.replace('''norm1''' , '''self_attn_layer_norm''' )
a = k.replace('''norm2''' , '''encoder_attn_layer_norm''' )
a = k.replace('''norm3''' , '''final_layer_norm''' )
return k
def _a ( a :Dict ) -> Tuple:
a = [
'''model.encoder.layernorm_embedding.weight''',
'''model.encoder.layernorm_embedding.bias''',
'''model.decoder.layernorm_embedding.weight''',
'''model.decoder.layernorm_embedding.bias''',
]
for k in keys:
a = sd.pop(a )
a = k.replace('''layernorm_embedding''' , '''layer_norm''' )
assert new_k not in sd
a = v
UpperCAmelCase__ = ["START"]
@torch.no_grad()
def _a ( a :Dict , a :str , a :int ) -> int:
a = torch.load(a , map_location='''cpu''' )
a = model['''model''']
a = BlenderbotConfig.from_json_file(a )
a = BlenderbotForConditionalGeneration(a )
a = m.model.state_dict().keys()
a = []
a = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
a = rename_state_dict_key(a )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
a = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(a )
m.model.load_state_dict(a , strict=a )
m.half()
m.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
UpperCAmelCase__ = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 117
| 0
|
"""simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Tuple=None ) -> Dict:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, F'{torch_layer} layer.weight does not match'
__A : Tuple = nn.Parameter(_SCREAMING_SNAKE_CASE )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'{torch_layer} layer.bias does not match'
__A : Any = nn.Parameter(_SCREAMING_SNAKE_CASE )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Any ) -> List[str]:
'''simple docstring'''
__A : Union[str, Any] = np.asarray(weights[0] )
__A : int = np.asarray(weights[1] )
__A : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(_SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , _SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , _SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(_SCREAMING_SNAKE_CASE ).view(-1 , _SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
'''simple docstring'''
__A : str = np.asarray(weights[0] )
__A : Union[str, Any] = np.asarray(weights[1] )
__A : Dict = np.asarray(weights[2] )
__A : Union[str, Any] = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(_SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , _SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(_SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , _SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(_SCREAMING_SNAKE_CASE ).transpose(1 , 2 ).contiguous().view(-1 , _SCREAMING_SNAKE_CASE ) , )
set_param(
torch_layer.output.dense , torch.tensor(_SCREAMING_SNAKE_CASE ).view(-1 , _SCREAMING_SNAKE_CASE ).contiguous().transpose(0 , 1 ) , )
def _lowercase ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int ) -> int:
'''simple docstring'''
__A : List[str] = weights[0][0][0]
__A : int = np.asarray(layer_norm_a[0] )
__A : Optional[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(_SCREAMING_SNAKE_CASE ) , torch.tensor(_SCREAMING_SNAKE_CASE ) , )
# lsh weights + output
__A : str = weights[0][1]
if len(_SCREAMING_SNAKE_CASE ) < 4:
set_layer_weights_in_torch_lsh(_SCREAMING_SNAKE_CASE , torch_block.attention , _SCREAMING_SNAKE_CASE )
else:
set_layer_weights_in_torch_local(_SCREAMING_SNAKE_CASE , torch_block.attention , _SCREAMING_SNAKE_CASE )
# intermediate weighs
__A : Any = weights[2][0][1][2]
# Chunked Feed Forward
if len(_SCREAMING_SNAKE_CASE ) == 4:
__A : Union[str, Any] = intermediate_weights[2]
# layernorm 2
__A : Any = np.asarray(intermediate_weights[0][0] )
__A : Any = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(_SCREAMING_SNAKE_CASE ) , torch.tensor(_SCREAMING_SNAKE_CASE ) , )
# intermediate dense
__A : Any = np.asarray(intermediate_weights[1][0] )
__A : Dict = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(_SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(_SCREAMING_SNAKE_CASE ) , )
# intermediate out
__A : Any = np.asarray(intermediate_weights[4][0] )
__A : str = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(_SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(_SCREAMING_SNAKE_CASE ) , )
def _lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[Any] ) -> List[str]:
'''simple docstring'''
__A : Dict = torch_model.reformer
# word embeds
__A : Optional[int] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(_SCREAMING_SNAKE_CASE ) , )
if isinstance(weights[3] , _SCREAMING_SNAKE_CASE ):
__A : List[str] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__A : List[str] = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'{position_embeddings[emb_idx]} emb does not match'
__A : int = nn.Parameter(torch.tensor(_SCREAMING_SNAKE_CASE ) )
__A : Optional[int] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
_SCREAMING_SNAKE_CASE ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__A : List[str] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# output layer norm
__A : List[Any] = np.asarray(weights[7][0] )
__A : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(_SCREAMING_SNAKE_CASE ) , torch.tensor(_SCREAMING_SNAKE_CASE ) , )
# output embeddings
__A : Dict = np.asarray(weights[9][0] )
__A : List[str] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(_SCREAMING_SNAKE_CASE ).transpose(0 , 1 ).contiguous() , torch.tensor(_SCREAMING_SNAKE_CASE ) , )
def _lowercase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
__A : Optional[Any] = ReformerConfig.from_json_file(_SCREAMING_SNAKE_CASE )
print(F'Building PyTorch model from configuration: {config}' )
__A : Dict = ReformerModelWithLMHead(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
__A : Tuple = pickle.load(_SCREAMING_SNAKE_CASE )['weights']
set_model_weights_in_torch(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , config.hidden_size )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase : Optional[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCamelCase : int =parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 710
|
"""simple docstring"""
from __future__ import annotations
lowerCamelCase : str =[
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _lowercase ( _SCREAMING_SNAKE_CASE : list[list[int]] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[list[int]] , ) -> tuple[list[list[int]], list[list[int]]]:
'''simple docstring'''
__A : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the reference grid
__A : List[Any] = 1
__A : Optional[Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_SCREAMING_SNAKE_CASE ) )
] # the action grid
__A : Union[str, Any] = init[0]
__A : List[Any] = init[1]
__A : str = 0
__A : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
__A : int = [[f, g, x, y]]
__A : Optional[int] = False # flag that is set when search is complete
__A : Dict = False # flag set if we can't find expand
while not found and not resign:
if len(_SCREAMING_SNAKE_CASE ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
__A : Optional[int] = cell.pop()
__A : Optional[int] = next_cell[2]
__A : List[Any] = next_cell[3]
__A : Optional[Any] = next_cell[1]
if x == goal[0] and y == goal[1]:
__A : Union[str, Any] = True
else:
for i in range(len(_SCREAMING_SNAKE_CASE ) ): # to try out different valid actions
__A : str = x + DIRECTIONS[i][0]
__A : Any = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_SCREAMING_SNAKE_CASE ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
__A : Optional[int] = g + cost
__A : Any = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
__A : List[str] = 1
__A : List[str] = i
__A : str = []
__A : Union[str, Any] = goal[0]
__A : Optional[Any] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
__A : Tuple = x - DIRECTIONS[action[x][y]][0]
__A : List[str] = y - DIRECTIONS[action[x][y]][1]
__A : int = xa
__A : Union[str, Any] = ya
invpath.append([x, y] )
__A : List[str] = []
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
path.append(invpath[len(_SCREAMING_SNAKE_CASE ) - 1 - i] )
return path, action
if __name__ == "__main__":
lowerCamelCase : List[Any] =[
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
lowerCamelCase : List[str] =[0, 0]
# all coordinates are given in format [y,x]
lowerCamelCase : Union[str, Any] =[len(grid) - 1, len(grid[0]) - 1]
lowerCamelCase : int =1
# the cost map which pushes the path closer to the goal
lowerCamelCase : Tuple =[[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
lowerCamelCase : List[Any] =abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
lowerCamelCase : Union[str, Any] =99
lowerCamelCase , lowerCamelCase : Union[str, Any] =search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 237
| 0
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=30 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=0.02 , ) -> Dict:
"""simple docstring"""
A : int = parent
A : List[Any] = batch_size
A : Tuple = image_size
A : Dict = patch_size
A : Union[str, Any] = num_channels
A : List[Any] = is_training
A : Any = use_labels
A : List[Any] = hidden_size
A : Any = num_hidden_layers
A : Optional[int] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Optional[Any] = hidden_dropout_prob
A : Optional[Any] = attention_probs_dropout_prob
A : str = type_sequence_label_size
A : List[str] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A : Optional[Any] = (image_size // patch_size) ** 2
A : Any = num_patches + 1
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : int = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, pixel_values
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Dict = FlaxViTModel(config=SCREAMING_SNAKE_CASE )
A : List[Any] = model(SCREAMING_SNAKE_CASE )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
A : Dict = (self.image_size, self.image_size)
A : int = (self.patch_size, self.patch_size)
A : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : str = self.type_sequence_label_size
A : List[Any] = FlaxViTForImageClassification(config=SCREAMING_SNAKE_CASE )
A : List[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Dict = 1
A : int = FlaxViTForImageClassification(SCREAMING_SNAKE_CASE )
A : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Union[str, Any] = model(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : Optional[Any] = self.prepare_config_and_inputs()
(
A
) : str = config_and_inputs
A : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class A ( A_ , unittest.TestCase ):
__magic_name__ = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def __lowerCAmelCase ( self ) -> None:
"""simple docstring"""
A : Dict = FlaxViTModelTester(self )
A : Any = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : int = model_class(SCREAMING_SNAKE_CASE )
A : Optional[Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Optional[int] = [*signature.parameters.keys()]
A : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A : List[Any] = self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[str] = model_class(SCREAMING_SNAKE_CASE )
@jax.jit
def model_jitted(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
return model(pixel_values=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
with self.subTest('''JIT Enabled''' ):
A : List[str] = model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
A : List[Any] = model_jitted(**SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
for jitted_output, output in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
A : Tuple = model_class_name.from_pretrained('''google/vit-base-patch16-224''' )
A : List[Any] = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 634
|
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=32 * 8 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=64 , ) -> Optional[int]:
_lowerCamelCase : List[str] = parent
_lowerCamelCase : List[Any] = batch_size
_lowerCamelCase : Tuple = is_training
_lowerCamelCase : Tuple = use_auxiliary_loss
_lowerCamelCase : Any = num_queries
_lowerCamelCase : List[str] = num_channels
_lowerCamelCase : List[str] = min_size
_lowerCamelCase : Tuple = max_size
_lowerCamelCase : str = num_labels
_lowerCamelCase : Any = hidden_dim
_lowerCamelCase : Dict = hidden_dim
def UpperCamelCase_ ( self) -> List[str]:
_lowerCamelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=SCREAMING_SNAKE_CASE) > 0.5
).float()
_lowerCamelCase : Dict = (torch.rand((self.batch_size, self.num_labels) , device=SCREAMING_SNAKE_CASE) > 0.5).long()
_lowerCamelCase : Optional[int] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase_ ( self) -> str:
_lowerCamelCase : List[str] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
_lowerCamelCase : Any = self.num_queries
_lowerCamelCase : int = self.num_labels
_lowerCamelCase : int = [1, 1, 1, 1]
_lowerCamelCase : Any = self.num_channels
_lowerCamelCase : Optional[Any] = 64
_lowerCamelCase : str = 128
_lowerCamelCase : Optional[Any] = self.hidden_dim
_lowerCamelCase : Any = self.hidden_dim
_lowerCamelCase : List[Any] = self.hidden_dim
return config
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] = self.prepare_config_and_inputs()
_lowerCamelCase : str = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> Optional[int]:
_lowerCamelCase : str = output.encoder_hidden_states
_lowerCamelCase : int = output.pixel_decoder_hidden_states
_lowerCamelCase : Optional[int] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , len(config.backbone_config.depths))
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE) , config.decoder_layers)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False) -> List[str]:
with torch.no_grad():
_lowerCamelCase : Optional[int] = MaskaFormerModel(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
_lowerCamelCase : Optional[int] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[str] = model(SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE) -> str:
_lowerCamelCase : str = MaskaFormerForUniversalSegmentation(config=SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.eval()
def comm_check_on_output(SCREAMING_SNAKE_CASE):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
_lowerCamelCase : List[Any] = model(pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE)
comm_check_on_output(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Optional[Any] = model(
pixel_values=SCREAMING_SNAKE_CASE , pixel_mask=SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE)
comm_check_on_output(SCREAMING_SNAKE_CASE)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class lowercase__ ( A_ ,A_ ,unittest.TestCase ):
__UpperCAmelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
__UpperCAmelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def UpperCamelCase_ ( self) -> Dict:
_lowerCamelCase : Optional[int] = MaskaFormerModelTester(self)
_lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> List[str]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*SCREAMING_SNAKE_CASE)
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""")
def UpperCamelCase_ ( self) -> Optional[int]:
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""")
def UpperCamelCase_ ( self) -> Tuple:
pass
@unittest.skip(reason="""Mask2Former is not a generative model""")
def UpperCamelCase_ ( self) -> List[Any]:
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""")
def UpperCamelCase_ ( self) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""")
def UpperCamelCase_ ( self) -> Dict:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""")
def UpperCamelCase_ ( self) -> Optional[int]:
pass
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : Dict = model_class(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : str = [*signature.parameters.keys()]
_lowerCamelCase : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE)
@slow
def UpperCamelCase_ ( self) -> Optional[int]:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
_lowerCamelCase : Optional[int] = MaskaFormerModel.from_pretrained(SCREAMING_SNAKE_CASE)
self.assertIsNotNone(SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Dict = (self.model_tester.min_size,) * 2
_lowerCamelCase : str = {
"""pixel_values""": torch.randn((2, 3, *size) , device=SCREAMING_SNAKE_CASE),
"""mask_labels""": torch.randn((2, 10, *size) , device=SCREAMING_SNAKE_CASE),
"""class_labels""": torch.zeros(2 , 10 , device=SCREAMING_SNAKE_CASE).long(),
}
_lowerCamelCase : List[str] = self.model_tester.get_config()
_lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.loss is not None)
def UpperCamelCase_ ( self) -> Tuple:
_lowerCamelCase , _lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , output_hidden_states=SCREAMING_SNAKE_CASE)
def UpperCamelCase_ ( self) -> Optional[int]:
_lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE , output_attentions=SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.attentions is not None)
def UpperCamelCase_ ( self) -> Optional[Any]:
if not self.model_tester.is_training:
return
_lowerCamelCase : Any = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : List[Any] = model_class(SCREAMING_SNAKE_CASE)
model.to(SCREAMING_SNAKE_CASE)
model.train()
_lowerCamelCase : int = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE).loss
loss.backward()
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Any = self.all_model_classes[1]
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
_lowerCamelCase : int = True
_lowerCamelCase : Optional[Any] = True
_lowerCamelCase : str = model_class(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
model.train()
_lowerCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE , mask_labels=SCREAMING_SNAKE_CASE , class_labels=SCREAMING_SNAKE_CASE)
_lowerCamelCase : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
_lowerCamelCase : int = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
_lowerCamelCase : str = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
_lowerCamelCase : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=SCREAMING_SNAKE_CASE)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
UpperCAmelCase = 1e-4
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def UpperCamelCase_ ( self) -> Union[str, Any]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints) if is_vision_available() else None
def UpperCamelCase_ ( self) -> Optional[Any]:
_lowerCamelCase : Tuple = MaskaFormerModel.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : List[str] = prepare_img()
_lowerCamelCase : Union[str, Any] = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384))
with torch.no_grad():
_lowerCamelCase : Dict = model(**SCREAMING_SNAKE_CASE)
_lowerCamelCase : List[Any] = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
_lowerCamelCase : Any = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
_lowerCamelCase : Dict = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self) -> Any:
_lowerCamelCase : Optional[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval()
_lowerCamelCase : Optional[Any] = self.default_image_processor
_lowerCamelCase : Any = prepare_img()
_lowerCamelCase : Dict = image_processor(SCREAMING_SNAKE_CASE , return_tensors="""pt""").to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0)
# check size
self.assertEqual(SCREAMING_SNAKE_CASE , (1, 3, 384, 384))
with torch.no_grad():
_lowerCamelCase : List[str] = model(**SCREAMING_SNAKE_CASE)
# masks_queries_logits
_lowerCamelCase : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4))
_lowerCamelCase : Any = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
_lowerCamelCase : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
# class_queries_logits
_lowerCamelCase : List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1))
_lowerCamelCase : Optional[Any] = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
]).to(SCREAMING_SNAKE_CASE)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE))
def UpperCamelCase_ ( self) -> int:
_lowerCamelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints).to(SCREAMING_SNAKE_CASE).eval()
_lowerCamelCase : str = self.default_image_processor
_lowerCamelCase : Tuple = image_processor(
[np.zeros((3, 800, 1333)), np.zeros((3, 800, 1333))] , segmentation_maps=[np.zeros((384, 384)).astype(np.floataa), np.zeros((384, 384)).astype(np.floataa)] , return_tensors="""pt""" , )
_lowerCamelCase : Optional[Any] = inputs["""pixel_values"""].to(SCREAMING_SNAKE_CASE)
_lowerCamelCase : Any = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""mask_labels"""]]
_lowerCamelCase : Union[str, Any] = [el.to(SCREAMING_SNAKE_CASE) for el in inputs["""class_labels"""]]
with torch.no_grad():
_lowerCamelCase : Any = model(**SCREAMING_SNAKE_CASE)
self.assertTrue(outputs.loss is not None)
| 88
| 0
|
__a = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__a = [{'type': 'code', 'content': INSTALL_CONTENT}]
__a = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 300
|
def lowerCamelCase__ ( _lowercase , _lowercase ):
'''simple docstring'''
if digit_amount > 0:
return round(number - int(_lowercase ) , _lowercase )
return number - int(_lowercase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 300
| 1
|
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int]=7 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Optional[int]=30 , lowerCAmelCase : Optional[int]=400 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : str=None , lowerCAmelCase : Dict=True , lowerCAmelCase : str=1 / 255 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Dict=[0.5, 0.5, 0.5] , lowerCAmelCase : Any=[0.5, 0.5, 0.5] , lowerCAmelCase : int=True , ) -> Dict:
"""simple docstring"""
_snake_case : int = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
_snake_case : int = parent
_snake_case : Union[str, Any] = batch_size
_snake_case : int = num_channels
_snake_case : Dict = min_resolution
_snake_case : Tuple = max_resolution
_snake_case : List[Any] = do_resize
_snake_case : Union[str, Any] = size
_snake_case : Dict = do_rescale
_snake_case : Optional[Any] = rescale_factor
_snake_case : str = do_normalize
_snake_case : int = image_mean
_snake_case : Union[str, Any] = image_std
_snake_case : Any = do_pad
def UpperCamelCase_ ( self : Dict) -> int:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : int=False) -> Union[str, Any]:
"""simple docstring"""
if not batched:
_snake_case : str = image_inputs[0]
if isinstance(lowerCAmelCase , Image.Image):
_snake_case , _snake_case : Optional[int] = image.size
else:
_snake_case , _snake_case : int = image.shape[1], image.shape[2]
if w < h:
_snake_case : Tuple = int(self.size["""shortest_edge"""] * h / w)
_snake_case : int = self.size["""shortest_edge"""]
elif w > h:
_snake_case : Optional[int] = self.size["""shortest_edge"""]
_snake_case : Dict = int(self.size["""shortest_edge"""] * w / h)
else:
_snake_case : Optional[Any] = self.size["""shortest_edge"""]
_snake_case : List[str] = self.size["""shortest_edge"""]
else:
_snake_case : Optional[int] = []
for image in image_inputs:
_snake_case , _snake_case : List[str] = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
_snake_case : List[str] = max(lowerCAmelCase , key=lambda lowerCAmelCase: item[0])[0]
_snake_case : Union[str, Any] = max(lowerCAmelCase , key=lambda lowerCAmelCase: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class snake_case ( __snake_case ,unittest.TestCase ):
'''simple docstring'''
snake_case_ : Any = DetrImageProcessor if is_vision_available() else None
def UpperCamelCase_ ( self : str) -> Tuple:
"""simple docstring"""
_snake_case : int = DetrImageProcessingTester(self)
@property
def UpperCamelCase_ ( self : Any) -> Union[str, Any]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase_ ( self : Optional[Any]) -> List[str]:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowerCAmelCase , """image_mean"""))
self.assertTrue(hasattr(lowerCAmelCase , """image_std"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_normalize"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_rescale"""))
self.assertTrue(hasattr(lowerCAmelCase , """rescale_factor"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_resize"""))
self.assertTrue(hasattr(lowerCAmelCase , """size"""))
self.assertTrue(hasattr(lowerCAmelCase , """do_pad"""))
def UpperCamelCase_ ( self : Optional[Any]) -> Any:
"""simple docstring"""
_snake_case : Dict = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333})
self.assertEqual(image_processor.do_pad , lowerCAmelCase)
_snake_case : Optional[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase)
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84})
self.assertEqual(image_processor.do_pad , lowerCAmelCase)
def UpperCamelCase_ ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
pass
def UpperCamelCase_ ( self : List[str]) -> str:
"""simple docstring"""
_snake_case : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image)
# Test not batched input
_snake_case : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
_snake_case , _snake_case : Union[str, Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case , _snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase)
_snake_case : Optional[Any] = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : str) -> List[Any]:
"""simple docstring"""
_snake_case : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
_snake_case : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray)
# Test not batched input
_snake_case : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
_snake_case , _snake_case : Tuple = self.image_processor_tester.get_expected_values(lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case : List[Any] = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
_snake_case , _snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def UpperCamelCase_ ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
_snake_case : Tuple = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
_snake_case : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase)
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor)
# Test not batched input
_snake_case : str = image_processing(image_inputs[0] , return_tensors="""pt""").pixel_values
_snake_case , _snake_case : int = self.image_processor_tester.get_expected_values(lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_snake_case : List[Any] = image_processing(lowerCAmelCase , return_tensors="""pt""").pixel_values
_snake_case , _snake_case : Optional[Any] = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def UpperCamelCase_ ( self : Dict) -> Optional[int]:
"""simple docstring"""
_snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""") as f:
_snake_case : Any = json.loads(f.read())
_snake_case : Tuple = {"""image_id""": 3_9769, """annotations""": target}
# encode them
_snake_case : Dict = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""")
_snake_case : List[str] = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors="""pt""")
# verify pixel values
_snake_case : Optional[int] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase)
_snake_case : List[str] = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4))
# verify area
_snake_case : Dict = torch.tensor([5_887.9_600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase))
# verify boxes
_snake_case : Optional[Any] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase)
_snake_case : List[Any] = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase , atol=1E-3))
# verify image_id
_snake_case : Optional[Any] = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase))
# verify is_crowd
_snake_case : str = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase))
# verify class_labels
_snake_case : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase))
# verify orig_size
_snake_case : int = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase))
# verify size
_snake_case : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase))
@slow
def UpperCamelCase_ ( self : Optional[int]) -> str:
"""simple docstring"""
_snake_case : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""") as f:
_snake_case : Optional[int] = json.loads(f.read())
_snake_case : Dict = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
_snake_case : str = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""")
# encode them
_snake_case : int = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""")
_snake_case : List[Any] = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors="""pt""")
# verify pixel values
_snake_case : Dict = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding["""pixel_values"""].shape , lowerCAmelCase)
_snake_case : Optional[int] = torch.tensor([0.2_796, 0.3_138, 0.3_481])
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , lowerCAmelCase , atol=1E-4))
# verify area
_snake_case : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5_879.6_562, 7_634.1_147])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , lowerCAmelCase))
# verify boxes
_snake_case : List[str] = torch.Size([6, 4])
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , lowerCAmelCase)
_snake_case : Union[str, Any] = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , lowerCAmelCase , atol=1E-3))
# verify image_id
_snake_case : int = torch.tensor([3_9769])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , lowerCAmelCase))
# verify is_crowd
_snake_case : Any = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , lowerCAmelCase))
# verify class_labels
_snake_case : List[str] = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , lowerCAmelCase))
# verify masks
_snake_case : Optional[int] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , lowerCAmelCase)
# verify orig_size
_snake_case : Any = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , lowerCAmelCase))
# verify size
_snake_case : List[str] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , lowerCAmelCase))
| 477
|
'''simple docstring'''
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase , UpperCAmelCase ) -> Any:
_snake_case = name
_snake_case = val
def __str__(self ) -> List[str]:
return f"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__(self , UpperCAmelCase ) -> Any:
return self.val < other.val
class _lowerCAmelCase :
'''simple docstring'''
def __init__(self , UpperCAmelCase ) -> Dict:
_snake_case = {}
_snake_case = {}
_snake_case = self.build_heap(UpperCAmelCase )
def __getitem__(self , UpperCAmelCase ) -> Union[str, Any]:
return self.get_value(UpperCAmelCase )
def lowercase (self , UpperCAmelCase ) -> Dict:
return (idx - 1) // 2
def lowercase (self , UpperCAmelCase ) -> Optional[Any]:
return idx * 2 + 1
def lowercase (self , UpperCAmelCase ) -> Optional[int]:
return idx * 2 + 2
def lowercase (self , UpperCAmelCase ) -> Union[str, Any]:
return self.heap_dict[key]
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = len(UpperCAmelCase ) - 1
_snake_case = self.get_parent_idx(UpperCAmelCase )
for idx, i in enumerate(UpperCAmelCase ):
_snake_case = idx
_snake_case = i.val
for i in range(UpperCAmelCase , -1 , -1 ):
self.sift_down(UpperCAmelCase , UpperCAmelCase )
return array
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> int:
while True:
_snake_case = self.get_left_child_idx(UpperCAmelCase ) # noqa: E741
_snake_case = self.get_right_child_idx(UpperCAmelCase )
_snake_case = idx
if l < len(UpperCAmelCase ) and array[l] < array[idx]:
_snake_case = l
if r < len(UpperCAmelCase ) and array[r] < array[smallest]:
_snake_case = r
if smallest != idx:
_snake_case, _snake_case = array[smallest], array[idx]
(
(
_snake_case
), (
_snake_case
),
) = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
_snake_case = smallest
else:
break
def lowercase (self , UpperCAmelCase ) -> str:
_snake_case = self.get_parent_idx(UpperCAmelCase )
while p >= 0 and self.heap[p] > self.heap[idx]:
_snake_case, _snake_case = self.heap[idx], self.heap[p]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
_snake_case = p
_snake_case = self.get_parent_idx(UpperCAmelCase )
def lowercase (self ) -> Optional[int]:
return self.heap[0]
def lowercase (self ) -> List[Any]:
_snake_case, _snake_case = self.heap[-1], self.heap[0]
_snake_case, _snake_case = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
_snake_case = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap )
return x
def lowercase (self , UpperCAmelCase ) -> List[str]:
self.heap.append(UpperCAmelCase )
_snake_case = len(self.heap ) - 1
_snake_case = node.val
self.sift_up(len(self.heap ) - 1 )
def lowercase (self ) -> int:
return len(self.heap ) == 0
def lowercase (self , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
_snake_case = new_value
_snake_case = new_value
self.sift_up(self.idx_of_element[node] )
__lowerCAmelCase = Node('R', -1)
__lowerCAmelCase = Node('B', 6)
__lowerCAmelCase = Node('A', 3)
__lowerCAmelCase = Node('X', 1)
__lowerCAmelCase = Node('E', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
__lowerCAmelCase = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('Min Heap - before decrease key')
for i in my_min_heap.heap:
print(i)
print('Min Heap - After decrease key of node [B -> -17]')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 585
| 0
|
'''simple docstring'''
UpperCAmelCase__ :List[Any] = 256
# Modulus to hash a string
UpperCAmelCase__ :str = 1_000_003
def __lowercase (_lowercase, _lowercase ) -> bool:
"""simple docstring"""
__lowerCamelCase : str = len(_lowercase )
__lowerCamelCase : List[str] = len(_lowercase )
if p_len > t_len:
return False
__lowerCamelCase : Optional[Any] = 0
__lowerCamelCase : Tuple = 0
__lowerCamelCase : Dict = 1
# Calculating the hash of pattern and substring of text
for i in range(_lowercase ):
__lowerCamelCase : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
__lowerCamelCase : Dict = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
__lowerCamelCase : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0, t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
__lowerCamelCase : Dict = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __lowercase () -> None:
"""simple docstring"""
__lowerCamelCase : List[Any] = """abc1abc12"""
__lowerCamelCase : Optional[Any] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
__lowerCamelCase : List[Any] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(_lowercase, _lowercase ) and not rabin_karp(_lowercase, _lowercase )
# Test 2)
__lowerCamelCase : Optional[int] = """ABABX"""
__lowerCamelCase : Dict = """ABABZABABYABABX"""
assert rabin_karp(_lowercase, _lowercase )
# Test 3)
__lowerCamelCase : Any = """AAAB"""
__lowerCamelCase : int = """ABAAAAAB"""
assert rabin_karp(_lowercase, _lowercase )
# Test 4)
__lowerCamelCase : Any = """abcdabcy"""
__lowerCamelCase : Dict = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(_lowercase, _lowercase )
# Test 5)
__lowerCamelCase : str = """Lü"""
__lowerCamelCase : str = """Lüsai"""
assert rabin_karp(_lowercase, _lowercase )
__lowerCamelCase : Tuple = """Lue"""
assert not rabin_karp(_lowercase, _lowercase )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 483
|
'''simple docstring'''
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowercase (_lowercase, _lowercase, _lowercase ) -> Optional[Any]:
"""simple docstring"""
# Initialise PyTorch model
__lowerCamelCase : str = RemBertConfig.from_json_file(_lowercase )
print("""Building PyTorch model from configuration: {}""".format(str(_lowercase ) ) )
__lowerCamelCase : List[Any] = RemBertModel(_lowercase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowercase, _lowercase, _lowercase )
# Save pytorch-model
print("""Save PyTorch model to {}""".format(_lowercase ) )
torch.save(model.state_dict(), _lowercase )
if __name__ == "__main__":
UpperCAmelCase__ :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCAmelCase__ :List[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 483
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A_ : Dict = logging.get_logger(__name__)
A_ : Optional[Any] = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''convnextv2'''
def __init__( self , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = patch_size
snake_case__ : List[Any] = num_stages
snake_case__ : Optional[Any] = [9_6, 1_9_2, 3_8_4, 7_6_8] if hidden_sizes is None else hidden_sizes
snake_case__ : Dict = [3, 3, 9, 3] if depths is None else depths
snake_case__ : List[str] = hidden_act
snake_case__ : str = initializer_range
snake_case__ : Tuple = layer_norm_eps
snake_case__ : int = drop_path_rate
snake_case__ : Dict = image_size
snake_case__ : Optional[int] = ["""stem"""] + [f"stage{idx}" for idx in range(1 , len(self.depths ) + 1 )]
snake_case__ , snake_case__ : Optional[int] = get_aligned_output_features_output_indices(
out_features=__SCREAMING_SNAKE_CASE , out_indices=__SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 38
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 638
| 0
|
'''simple docstring'''
import math
def __lowercase ( __SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def __lowercase ( __SCREAMING_SNAKE_CASE = 0.1 ) -> int:
"""simple docstring"""
__a = 3
__a = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(__SCREAMING_SNAKE_CASE )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class lowerCAmelCase_ ( snake_case__ ):
"""simple docstring"""
a_ :Dict =["""image_processor""", """feature_extractor"""]
a_ :str ="""TvltImageProcessor"""
a_ :str ="""TvltFeatureExtractor"""
def __init__( self : Dict , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
super().__init__(image_processor=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
__a = image_processor
__a = feature_extractor
def __call__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Optional[Any]=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : List[str]=False , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError("""You need to specify either an `images` or `audio` input to process.""" )
__a = None
if images is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , mask_pixel=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if images_mixed is not None:
__a = self.image_processor(SCREAMING_SNAKE_CASE__ , is_mixed=SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
if audio is not None:
__a = self.feature_extractor(
SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ , sampling_rate=SCREAMING_SNAKE_CASE__ , mask_audio=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__a = {}
if audio is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
if images_mixed_dict is not None:
output_dict.update(SCREAMING_SNAKE_CASE__ )
return output_dict
@property
def __a ( self : List[str] ):
'''simple docstring'''
__a = self.image_processor.model_input_names
__a = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 201
| 1
|
class _lowerCamelCase : # Public class to implement a graph
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
SCREAMING_SNAKE_CASE__: Dict= row
SCREAMING_SNAKE_CASE__: int= col
SCREAMING_SNAKE_CASE__: List[Any]= graph
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> None:
# Checking all 8 elements surrounding nth element
SCREAMING_SNAKE_CASE__: int= [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
SCREAMING_SNAKE_CASE__: int= [-1, 0, 1, -1, 1, -1, 0, 1]
SCREAMING_SNAKE_CASE__: int= True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , lowerCAmelCase )
def UpperCamelCase_ ( self ) -> int: # And finally, count all islands.
SCREAMING_SNAKE_CASE__: Union[str, Any]= [[False for j in range(self.COL )] for i in range(self.ROW )]
SCREAMING_SNAKE_CASE__: Tuple= 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
count += 1
return count
| 64
|
import inspect
import unittest
class _lowerCamelCase ( unittest.TestCase ):
def UpperCamelCase_ ( self ) -> Any:
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCamelCase_ ( self ) -> List[str]:
import diffusers
from diffusers.dependency_versions_table import deps
SCREAMING_SNAKE_CASE__: Tuple= inspect.getmembers(lowerCAmelCase , inspect.isclass )
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
SCREAMING_SNAKE_CASE__: Optional[int]= '''k-diffusion'''
elif backend == "invisible_watermark":
SCREAMING_SNAKE_CASE__: int= '''invisible-watermark'''
assert backend in deps, f'{backend} is not in the deps table!'
| 64
| 1
|
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCamelCase ( _A , unittest.TestCase ):
snake_case_ = FlaxAutoencoderKL
@property
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = 4
lowerCAmelCase : int = 3
lowerCAmelCase : Union[str, Any] = (32, 32)
lowerCAmelCase : Any = jax.random.PRNGKey(0 )
lowerCAmelCase : Any = jax.random.uniform(a_ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
lowerCAmelCase : int = self.dummy_input
return init_dict, inputs_dict
| 551
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCamelCase :
def __init__( self , a_ , a_=2 , a_=True , a_=False , a_=10 , a_=3 , a_=32 * 8 , a_=32 * 8 , a_=4 , a_=64 , ):
lowerCAmelCase : Optional[int] = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : str = is_training
lowerCAmelCase : Any = use_auxiliary_loss
lowerCAmelCase : Dict = num_queries
lowerCAmelCase : Dict = num_channels
lowerCAmelCase : str = min_size
lowerCAmelCase : Any = max_size
lowerCAmelCase : int = num_labels
lowerCAmelCase : Tuple = hidden_dim
lowerCAmelCase : Optional[Any] = hidden_dim
def _lowerCamelCase ( self ):
lowerCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
a_ )
lowerCAmelCase : Optional[int] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=a_ )
lowerCAmelCase : str = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=a_ ) > 0.5
).float()
lowerCAmelCase : Union[str, Any] = (torch.rand((self.batch_size, self.num_labels) , device=a_ ) > 0.5).long()
lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[Any] = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
lowerCAmelCase : Optional[Any] = self.num_queries
lowerCAmelCase : Optional[Any] = self.num_labels
lowerCAmelCase : str = [1, 1, 1, 1]
lowerCAmelCase : Tuple = self.num_channels
lowerCAmelCase : str = 64
lowerCAmelCase : Any = 128
lowerCAmelCase : List[str] = self.hidden_dim
lowerCAmelCase : Optional[Any] = self.hidden_dim
lowerCAmelCase : int = self.hidden_dim
return config
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase : Optional[int] = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def _lowerCamelCase ( self , a_ , a_ ):
lowerCAmelCase : str = output.encoder_hidden_states
lowerCAmelCase : List[str] = output.pixel_decoder_hidden_states
lowerCAmelCase : Any = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(a_ ) , config.decoder_layers )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_=False ):
with torch.no_grad():
lowerCAmelCase : Tuple = MaskaFormerModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase : Tuple = model(pixel_values=a_ , pixel_mask=a_ )
lowerCAmelCase : str = model(a_ , output_hidden_states=a_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(a_ , a_ )
def _lowerCamelCase ( self , a_ , a_ , a_ , a_ , a_ ):
lowerCAmelCase : Union[str, Any] = MaskaFormerForUniversalSegmentation(config=a_ )
model.to(a_ )
model.eval()
def comm_check_on_output(a_ ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase : List[str] = model(pixel_values=a_ , pixel_mask=a_ )
lowerCAmelCase : Union[str, Any] = model(a_ )
comm_check_on_output(a_ )
lowerCAmelCase : str = model(
pixel_values=a_ , pixel_mask=a_ , mask_labels=a_ , class_labels=a_ )
comm_check_on_output(a_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowerCamelCase ( _A , _A , unittest.TestCase ):
snake_case_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
snake_case_ = {"feature-extraction": MaskaFormerModel} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = MaskaFormerModelTester(self )
lowerCAmelCase : Tuple = ConfigTester(self , config_class=a_ , has_text_modality=a_ )
def _lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a_ , **a_ , output_hidden_states=a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*a_ )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def _lowerCamelCase ( self ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def _lowerCamelCase ( self ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Dict = model_class(a_ )
lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase : List[str] = [*signature.parameters.keys()]
lowerCAmelCase : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a_ )
@slow
def _lowerCamelCase ( self ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
lowerCAmelCase : int = MaskaFormerModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase : List[Any] = (self.model_tester.min_size,) * 2
lowerCAmelCase : Union[str, Any] = {
"pixel_values": torch.randn((2, 3, *size) , device=a_ ),
"mask_labels": torch.randn((2, 10, *size) , device=a_ ),
"class_labels": torch.zeros(2 , 10 , device=a_ ).long(),
}
lowerCAmelCase : Optional[Any] = self.model_tester.get_config()
lowerCAmelCase : List[Any] = MaskaFormerForUniversalSegmentation(a_ ).to(a_ )
lowerCAmelCase : Dict = model(**a_ )
self.assertTrue(outputs.loss is not None )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(a_ , **a_ , output_hidden_states=a_ )
def _lowerCamelCase ( self ):
lowerCAmelCase , lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase : Optional[int] = model_class(a_ ).to(a_ )
lowerCAmelCase : Union[str, Any] = model(**a_ , output_attentions=a_ )
self.assertTrue(outputs.attentions is not None )
def _lowerCamelCase ( self ):
if not self.model_tester.is_training:
return
lowerCAmelCase : List[Any] = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : List[Any] = model_class(a_ )
model.to(a_ )
model.train()
lowerCAmelCase : Union[str, Any] = model(a_ , mask_labels=a_ , class_labels=a_ ).loss
loss.backward()
def _lowerCamelCase ( self ):
lowerCAmelCase : Optional[int] = self.all_model_classes[1]
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase : Tuple = True
lowerCAmelCase : int = True
lowerCAmelCase : int = model_class(a_ ).to(a_ )
model.train()
lowerCAmelCase : Any = model(a_ , mask_labels=a_ , class_labels=a_ )
lowerCAmelCase : List[Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
lowerCAmelCase : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=a_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCAmelCase = 1e-4
def __A ( ):
lowerCAmelCase : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _lowerCamelCase ( self ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _lowerCamelCase ( self ):
lowerCAmelCase : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(a_ )
lowerCAmelCase : Optional[int] = self.default_image_processor
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : Any = image_processor(a_ , return_tensors="pt" ).to(a_ )
lowerCAmelCase : str = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase : Dict = model(**a_ )
lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) )
lowerCAmelCase : Optional[Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , a_ , atol=a_ ) )
lowerCAmelCase : Optional[int] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(a_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , a_ , atol=a_ ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Any = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a_ ).eval()
lowerCAmelCase : Union[str, Any] = self.default_image_processor
lowerCAmelCase : Dict = prepare_img()
lowerCAmelCase : Tuple = image_processor(a_ , return_tensors="pt" ).to(a_ )
lowerCAmelCase : Any = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(a_ , (1, 3, 384, 384) )
with torch.no_grad():
lowerCAmelCase : List[str] = model(**a_ )
# masks_queries_logits
lowerCAmelCase : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
lowerCAmelCase : Tuple = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
lowerCAmelCase : Tuple = torch.tensor(a_ ).to(a_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , a_ , atol=a_ ) )
# class_queries_logits
lowerCAmelCase : str = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase : List[str] = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(a_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , a_ , atol=a_ ) )
def _lowerCamelCase ( self ):
lowerCAmelCase : Tuple = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(a_ ).eval()
lowerCAmelCase : Optional[Any] = self.default_image_processor
lowerCAmelCase : Optional[int] = image_processor(
[np.zeros((3, 800, 1_333) ), np.zeros((3, 800, 1_333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
lowerCAmelCase : Optional[int] = inputs["pixel_values"].to(a_ )
lowerCAmelCase : int = [el.to(a_ ) for el in inputs["mask_labels"]]
lowerCAmelCase : Union[str, Any] = [el.to(a_ ) for el in inputs["class_labels"]]
with torch.no_grad():
lowerCAmelCase : Optional[Any] = model(**a_ )
self.assertTrue(outputs.loss is not None )
| 551
| 1
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return "".join(sorted(_lowercase ) )
def lowerCamelCase__ ( _lowercase ):
'''simple docstring'''
return word_by_signature[signature(_lowercase )]
__a = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
__a = sorted({word.strip().lower() for word in data.splitlines()})
__a = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
__a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 30
|
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : str = {
"snap-research/efficientformer-l1-300": (
"https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"
),
}
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = "efficientformer"
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case = [True, True, True, True] , snake_case = 4_4_8 , snake_case = 3_2 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 1_6 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1e-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1e-12 , snake_case = 2_2_4 , snake_case = 1e-05 , **snake_case , ):
'''simple docstring'''
super().__init__(**snake_case )
UpperCAmelCase : Any = hidden_act
UpperCAmelCase : Optional[Any] = hidden_dropout_prob
UpperCAmelCase : List[Any] = hidden_sizes
UpperCAmelCase : str = num_hidden_layers
UpperCAmelCase : int = num_attention_heads
UpperCAmelCase : List[Any] = initializer_range
UpperCAmelCase : str = layer_norm_eps
UpperCAmelCase : int = patch_size
UpperCAmelCase : Optional[int] = num_channels
UpperCAmelCase : Any = depths
UpperCAmelCase : Dict = mlp_expansion_ratio
UpperCAmelCase : List[str] = downsamples
UpperCAmelCase : List[Any] = dim
UpperCAmelCase : Any = key_dim
UpperCAmelCase : List[str] = attention_ratio
UpperCAmelCase : Union[str, Any] = resolution
UpperCAmelCase : List[str] = pool_size
UpperCAmelCase : Dict = downsample_patch_size
UpperCAmelCase : Optional[int] = downsample_stride
UpperCAmelCase : Any = downsample_pad
UpperCAmelCase : int = drop_path_rate
UpperCAmelCase : Optional[Any] = num_metaad_blocks
UpperCAmelCase : List[str] = distillation
UpperCAmelCase : int = use_layer_scale
UpperCAmelCase : List[str] = layer_scale_init_value
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Any = batch_norm_eps
| 679
| 0
|
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __magic_name__ ( lowercase_ ,unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase = VideoToVideoSDPipeline
_UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"video"} ) - {"image", "width", "height"}
_UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"video"} ) - {"image"}
_UpperCamelCase = PipelineTesterMixin.required_optional_params - {"latents"}
_UpperCamelCase = False
# No `output_type`.
_UpperCamelCase = frozenset(
[
"num_inference_steps",
"generator",
"latents",
"return_dict",
"callback",
"callback_steps",
] )
def _UpperCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
_lowerCamelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=a__ , set_alpha_to_one=a__ , )
torch.manual_seed(0 )
_lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
_lowerCamelCase = CLIPTextModel(a__ )
_lowerCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
_lowerCamelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _UpperCAmelCase ( self , a__ , a__=0 ):
# 3 frames
_lowerCamelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(a__ ) ).to(a__ )
if str(a__ ).startswith('''mps''' ):
_lowerCamelCase = torch.manual_seed(a__ )
else:
_lowerCamelCase = torch.Generator(device=a__ ).manual_seed(a__ )
_lowerCamelCase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def _UpperCAmelCase ( self ):
_lowerCamelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
_lowerCamelCase = self.get_dummy_components()
_lowerCamelCase = VideoToVideoSDPipeline(**a__ )
_lowerCamelCase = sd_pipe.to(a__ )
sd_pipe.set_progress_bar_config(disable=a__ )
_lowerCamelCase = self.get_dummy_inputs(a__ )
_lowerCamelCase = '''np'''
_lowerCamelCase = sd_pipe(**a__ ).frames
_lowerCamelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_lowerCamelCase = np.array([1_06, 1_17, 1_13, 1_74, 1_37, 1_12, 1_48, 1_51, 1_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _UpperCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=a__ , expected_max_diff=5E-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _UpperCAmelCase ( self ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def _UpperCAmelCase ( self ):
pass
def _UpperCAmelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
_lowerCamelCase = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_lowerCamelCase = torch.Generator(device='''cpu''' ).manual_seed(0 )
_lowerCamelCase = torch.randn((1, 10, 3, 10_24, 5_76) , generator=a__ )
_lowerCamelCase = video.to('''cuda''' )
_lowerCamelCase = '''Spiderman is surfing'''
_lowerCamelCase = pipe(a__ , video=a__ , generator=a__ , num_inference_steps=3 , output_type='''pt''' ).frames
_lowerCamelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 297
|
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase = logging.getLogger()
_UpperCAmelCase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
def _UpperCAmelCase ( self , a__ ):
os.makedirs(a__ , exist_ok=a__ )
_lowerCamelCase = {'''source''': '''What is love ?''', '''target''': '''life'''}
_lowerCamelCase = {'''train''': 12, '''val''': 2, '''test''': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_lowerCamelCase = '''\n'''.join([contents[field]] * n_lines[split] )
with open(os.path.join(a__ , f'''{split}.{field}''' ) , '''w''' ) as f:
f.write(a__ )
def _UpperCAmelCase ( self , a__ , a__ = "pytorch" ):
_lowerCamelCase = self.get_auto_remove_tmp_dir()
_lowerCamelCase = os.path.join(a__ , '''output''' )
_lowerCamelCase = os.path.join(a__ , '''data''' )
self._create_dummy_data(data_dir=a__ )
_lowerCamelCase = f'''
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
'''.split()
if gpus > 0:
testargs.append(f'''--gpus={gpus}''' )
if is_apex_available():
testargs.append('''--fp16''' )
else:
testargs.append('''--gpus=0''' )
testargs.append('''--distributed_backend=ddp_cpu''' )
testargs.append('''--num_processes=2''' )
_lowerCamelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(a__ , env=self.get_env() )
_lowerCamelCase = os.path.join(a__ , '''metrics.json''' )
with open(a__ ) as f:
_lowerCamelCase = json.load(a__ )
return result
@require_torch_gpu
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_gpu
@require_ray
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
@require_torch_multi_gpu
@require_ray
def _UpperCAmelCase ( self ):
_lowerCamelCase = self._run_finetune(gpus=1 , distributed_retriever='''ray''' )
self.assertGreaterEqual(result['''test'''][0]['''test_avg_em'''] , 0.2 )
| 297
| 1
|
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict:
A__ : int =old_name
if "patch_embed" in old_name:
A__ : Optional[int] =old_name.split('''.''' )
if layer == "0":
A__ : Any =old_name.replace('''0''', '''convolution1''' )
elif layer == "1":
A__ : List[str] =old_name.replace('''1''', '''batchnorm_before''' )
elif layer == "3":
A__ : Any =old_name.replace('''3''', '''convolution2''' )
else:
A__ : Optional[Any] =old_name.replace('''4''', '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''', lowerCamelCase_ ):
A__ : Optional[int] =R'\b\d{2}\b'
if bool(re.search(lowerCamelCase_, lowerCamelCase_ ) ):
A__ : Union[str, Any] =re.search(R'''\d\.\d\d.''', lowerCamelCase_ ).group()
else:
A__ : List[Any] =re.search(R'''\d\.\d.''', lowerCamelCase_ ).group()
if int(match[0] ) < 6:
A__ : int =old_name.replace(lowerCamelCase_, '''''' )
A__ : Dict =trimmed_name.replace('''network''', match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
A__ : List[Any] ='intermediate_stages.' + trimmed_name
else:
A__ : Any =old_name.replace(lowerCamelCase_, '''''' )
if int(match[2] ) < num_meta4D_last_stage:
A__ : str =trimmed_name.replace('''network''', '''meta4D_layers.blocks.''' + match[2] )
else:
A__ : Dict =str(int(match[2] ) - num_meta4D_last_stage )
A__ : Optional[Any] =trimmed_name.replace('''network''', '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
A__ : Optional[Any] =trimmed_name.replace('''norm1''', '''layernorm1''' )
elif "norm2" in old_name:
A__ : Optional[Any] =trimmed_name.replace('''norm2''', '''layernorm2''' )
elif "fc1" in old_name:
A__ : Optional[Any] =trimmed_name.replace('''fc1''', '''linear_in''' )
elif "fc2" in old_name:
A__ : Any =trimmed_name.replace('''fc2''', '''linear_out''' )
A__ : List[Any] ='last_stage.' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''', lowerCamelCase_ ):
A__ : str =old_name.replace('''network''', '''intermediate_stages''' )
if "fc" in new_name:
A__ : Optional[Any] =new_name.replace('''fc''', '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
A__ : List[Any] =new_name.replace('''norm1''', '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
A__ : Tuple =new_name.replace('''norm2''', '''batchnorm_after''' )
if "proj" in new_name:
A__ : Union[str, Any] =new_name.replace('''proj''', '''projection''' )
if "dist_head" in new_name:
A__ : Tuple =new_name.replace('''dist_head''', '''distillation_classifier''' )
elif "head" in new_name:
A__ : Tuple =new_name.replace('''head''', '''classifier''' )
elif "patch_embed" in new_name:
A__ : Any ='efficientformer.' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
A__ : Optional[int] =new_name.replace('''norm''', '''layernorm''' )
A__ : Union[str, Any] ='efficientformer.' + new_name
else:
A__ : Union[str, Any] ='efficientformer.encoder.' + new_name
return new_name
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_ ) -> Dict:
for key in checkpoint.copy().keys():
A__ : Optional[Any] =checkpoint.pop(lowerCamelCase_ )
A__ : Union[str, Any] =val
return checkpoint
def SCREAMING_SNAKE_CASE__ ( ) -> List[Any]:
A__ : Dict ='http://images.cocodataset.org/val2017/000000039769.jpg'
A__ : Union[str, Any] =Image.open(requests.get(lowerCamelCase_, stream=lowerCamelCase_ ).raw )
return image
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_, snake_case_ ) -> int:
A__ : Dict =torch.load(lowerCamelCase_, map_location='''cpu''' )['model']
A__ : Any =EfficientFormerConfig.from_json_file(lowerCamelCase_ )
A__ : List[Any] =EfficientFormerForImageClassificationWithTeacher(lowerCamelCase_ )
A__ : Optional[int] ='_'.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
A__ : Dict =config.depths[-1] - config.num_metaad_blocks + 1
A__ : Tuple =convert_torch_checkpoint(lowerCamelCase_, lowerCamelCase_ )
model.load_state_dict(lowerCamelCase_ )
model.eval()
A__ : Optional[int] ={
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
# prepare image
A__ : List[str] =prepare_img()
A__ : List[Any] =2_5_6
A__ : Any =2_2_4
A__ : Any =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size}, crop_size={'''height''': crop_size, '''width''': crop_size}, resample=pillow_resamplings['''bicubic'''], )
A__ : Union[str, Any] =processor(images=lowerCamelCase_, return_tensors='''pt''' ).pixel_values
# original processing pipeline
A__ : List[str] =Compose(
[
Resize(lowerCamelCase_, interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(lowerCamelCase_ ),
ToTensor(),
Normalize(lowerCamelCase_, lowerCamelCase_ ),
] )
A__ : Any =image_transforms(lowerCamelCase_ ).unsqueeze(0 )
assert torch.allclose(lowerCamelCase_, lowerCamelCase_ )
A__ : Dict =model(lowerCamelCase_ )
A__ : str =outputs.logits
A__ : Any =(1, 1_0_0_0)
if "l1" in model_name:
A__ : str =torch.Tensor(
[-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9, -0.5_1_2_4, 0.4_1_8_3, -0.6_7_9_3, -1.3_7_7_7, -0.0_8_9_3, -0.7_3_5_8, -2.4_3_2_8] )
assert torch.allclose(logits[0, :1_0], lowerCamelCase_, atol=1e-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
A__ : Optional[Any] =torch.Tensor(
[-1.3_1_5_0, -1.5_4_5_6, -1.2_5_5_6, -0.8_4_9_6, -0.7_1_2_7, -0.7_8_9_7, -0.9_7_2_8, -0.3_0_5_2, 0.3_7_5_1, -0.3_1_2_7] )
assert torch.allclose(logits[0, :1_0], lowerCamelCase_, atol=1e-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
A__ : Optional[Any] =torch.Tensor(
[-1.0_2_8_3, -1.4_1_3_1, -0.5_6_4_4, -1.3_1_1_5, -0.5_7_8_5, -1.2_0_4_9, -0.7_5_2_8, 0.1_9_9_2, -0.3_8_2_2, -0.0_8_7_8] )
assert logits.shape == expected_shape
else:
raise ValueError(
f'Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7' )
# Save Checkpoints
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
print(f'Checkpoint successfuly converted. Model saved at {pytorch_dump_path}' )
processor.save_pretrained(lowerCamelCase_ )
print(f'Processor successfuly saved at {pytorch_dump_path}' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}', commit_message='''Add model''', use_temp_dir=lowerCamelCase_, )
processor.push_to_hub(
repo_id=f'Bearnardd/{pytorch_dump_path}', commit_message='''Add image processor''', use_temp_dir=lowerCamelCase_, )
if __name__ == "__main__":
__lowerCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path",
default=None,
type=str,
required=True,
help="Path to EfficientFormer pytorch checkpoint.",
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for EfficientFormer model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
parser.set_defaults(push_to_hub=True)
__lowerCamelCase : List[str] = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 416
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
UpperCamelCase__ : Dict = None
UpperCamelCase__ : Tuple = logging.get_logger(__name__)
UpperCamelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCamelCase__ : Dict = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
'''tokenizer_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json'''
),
},
}
UpperCamelCase__ : Dict = {
'''moussaKam/mbarthez''': 10_24,
'''moussaKam/barthez''': 10_24,
'''moussaKam/barthez-orangesum-title''': 10_24,
}
UpperCamelCase__ : Any = '''▁'''
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : List[str] = VOCAB_FILES_NAMES
__a : int = PRETRAINED_VOCAB_FILES_MAP
__a : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a : List[Any] = ["input_ids", "attention_mask"]
__a : Dict = BarthezTokenizer
def __init__( self ,snake_case__=None ,snake_case__=None ,snake_case__="<s>" ,snake_case__="</s>" ,snake_case__="</s>" ,snake_case__="<s>" ,snake_case__="<unk>" ,snake_case__="<pad>" ,snake_case__="<mask>" ,**snake_case__ ,):
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE_ : str = AddedToken(snake_case__ ,lstrip=snake_case__ ,rstrip=snake_case__ ) if isinstance(snake_case__ ,snake_case__ ) else mask_token
super().__init__(
snake_case__ ,tokenizer_file=snake_case__ ,bos_token=snake_case__ ,eos_token=snake_case__ ,unk_token=snake_case__ ,sep_token=snake_case__ ,cls_token=snake_case__ ,pad_token=snake_case__ ,mask_token=snake_case__ ,**snake_case__ ,)
SCREAMING_SNAKE_CASE_ : List[Any] = vocab_file
SCREAMING_SNAKE_CASE_ : int = False if not self.vocab_file else True
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE_ : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
SCREAMING_SNAKE_CASE_ : Dict = [self.sep_token_id]
SCREAMING_SNAKE_CASE_ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case ( self ,snake_case__ ,snake_case__ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(snake_case__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
SCREAMING_SNAKE_CASE_ : Optional[Any] = os.path.join(
snake_case__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ):
copyfile(self.vocab_file ,snake_case__ )
return (out_vocab_file,)
| 105
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Union[str, Any] = 'poolformer'
def __init__( self , _UpperCAmelCase=3 , _UpperCAmelCase=16 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=4.0 , _UpperCAmelCase=[2, 2, 6, 2] , _UpperCAmelCase=[64, 128, 320, 512] , _UpperCAmelCase=[7, 3, 3, 3] , _UpperCAmelCase=[4, 2, 2, 2] , _UpperCAmelCase=[2, 1, 1, 1] , _UpperCAmelCase=4 , _UpperCAmelCase=0.0 , _UpperCAmelCase="gelu" , _UpperCAmelCase=True , _UpperCAmelCase=1E-5 , _UpperCAmelCase=0.02 , **_UpperCAmelCase , ):
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = stride
lowerCAmelCase_ = padding
lowerCAmelCase_ = pool_size
lowerCAmelCase_ = hidden_sizes
lowerCAmelCase_ = mlp_ratio
lowerCAmelCase_ = depths
lowerCAmelCase_ = patch_sizes
lowerCAmelCase_ = strides
lowerCAmelCase_ = num_encoder_blocks
lowerCAmelCase_ = drop_path_rate
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = use_layer_scale
lowerCAmelCase_ = layer_scale_init_value
lowerCAmelCase_ = initializer_range
super().__init__(**_UpperCAmelCase)
class UpperCamelCase_ ( A ):
'''simple docstring'''
a :Optional[Any] = version.parse('1.11' )
@property
def lowercase__ ( self):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
])
@property
def lowercase__ ( self):
return 2E-3
| 413
|
import torch
def lowerCamelCase_ ( ):
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase_ = torch.cuda.device_count()
else:
lowerCAmelCase_ = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 413
| 1
|
"""simple docstring"""
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase ) -> int:
'''simple docstring'''
while second != 0:
_lowerCamelCase : int = first & second
first ^= second
_lowerCamelCase : str = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCAmelCase : Tuple = int(input('''Enter the first number: ''').strip())
_lowerCAmelCase : Union[str, Any] = int(input('''Enter the second number: ''').strip())
print(f'''{add(first, second) = }''')
| 46
|
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def lowerCamelCase_( _lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
return 1.0 / (1.0 + np.exp(-_outputs ))
def lowerCamelCase_( _lowerCamelCase ) -> Optional[int]:
'''simple docstring'''
_lowerCamelCase : Any = np.max(_outputs , axis=-1 , keepdims=_lowerCamelCase )
_lowerCamelCase : Dict = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=_lowerCamelCase )
class A_ ( _a ):
lowerCAmelCase__ = 'sigmoid'
lowerCAmelCase__ = 'softmax'
lowerCAmelCase__ = 'none'
@add_end_docstrings(
_a , r'\n return_all_scores (`bool`, *optional*, defaults to `False`):\n Whether to return all prediction scores or just the one of the predicted class.\n function_to_apply (`str`, *optional*, defaults to `"default"`):\n The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:\n\n - `"default"`: if the model has a single label, will apply the sigmoid function on the output. If the model\n has several labels, will apply the softmax function on the output.\n - `"sigmoid"`: Applies the sigmoid function on the output.\n - `"softmax"`: Applies the softmax function on the output.\n - `"none"`: Does not apply any function on the output.\n ' , )
class A_ ( _a ):
lowerCAmelCase__ = False
lowerCAmelCase__ = ClassificationFunction.NONE
def __init__( self: str ,**__lowerCAmelCase: str ):
'''simple docstring'''
super().__init__(**__lowerCAmelCase )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def _lowercase ( self: Dict ,__lowerCAmelCase: List[Any]=None ,__lowerCAmelCase: Optional[Any]=None ,__lowerCAmelCase: List[Any]="" ,**__lowerCAmelCase: List[str] ):
'''simple docstring'''
_lowerCamelCase : Optional[int] = tokenizer_kwargs
_lowerCamelCase : Optional[int] = {}
if hasattr(self.model.config ,"return_all_scores" ) and return_all_scores is None:
_lowerCamelCase : Tuple = self.model.config.return_all_scores
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) or top_k is None:
_lowerCamelCase : List[str] = top_k
_lowerCamelCase : Union[str, Any] = False
elif return_all_scores is not None:
warnings.warn(
"`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"
" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`." ,__lowerCAmelCase ,)
if return_all_scores:
_lowerCamelCase : Optional[int] = None
else:
_lowerCamelCase : Union[str, Any] = 1
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Optional[int] = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
_lowerCamelCase : Dict = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__( self: int ,*__lowerCAmelCase: List[Any] ,**__lowerCAmelCase: str ):
'''simple docstring'''
_lowerCamelCase : Dict = super().__call__(*__lowerCAmelCase ,**__lowerCAmelCase )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
_lowerCamelCase : Optional[Any] = "top_k" not in kwargs
if isinstance(args[0] ,__lowerCAmelCase ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def _lowercase ( self: int ,__lowerCAmelCase: List[str] ,**__lowerCAmelCase: Optional[int] ):
'''simple docstring'''
_lowerCamelCase : int = self.framework
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
return self.tokenizer(**__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) == 1 and isinstance(inputs[0] ,__lowerCAmelCase ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] ,text_pair=inputs[0][1] ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"
" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair." )
return self.tokenizer(__lowerCAmelCase ,return_tensors=__lowerCAmelCase ,**__lowerCAmelCase )
def _lowercase ( self: int ,__lowerCAmelCase: Optional[Any] ):
'''simple docstring'''
return self.model(**__lowerCAmelCase )
def _lowercase ( self: Optional[Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: int=None ,__lowerCAmelCase: str=1 ,__lowerCAmelCase: Dict=True ):
'''simple docstring'''
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
_lowerCamelCase : Dict = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
_lowerCamelCase : List[Any] = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config ,"function_to_apply" ) and function_to_apply is None:
_lowerCamelCase : Optional[int] = self.model.config.function_to_apply
else:
_lowerCamelCase : str = ClassificationFunction.NONE
_lowerCamelCase : List[Any] = model_outputs["logits"][0]
_lowerCamelCase : Optional[int] = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
_lowerCamelCase : str = sigmoid(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.SOFTMAX:
_lowerCamelCase : Optional[int] = softmax(__lowerCAmelCase )
elif function_to_apply == ClassificationFunction.NONE:
_lowerCamelCase : str = outputs
else:
raise ValueError(F"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
_lowerCamelCase : Optional[int] = [
{"label": self.model.config.idalabel[i], "score": score.item()} for i, score in enumerate(__lowerCAmelCase )
]
if not _legacy:
dict_scores.sort(key=lambda __lowerCAmelCase : x["score"] ,reverse=__lowerCAmelCase )
if top_k is not None:
_lowerCamelCase : Any = dict_scores[:top_k]
return dict_scores
| 46
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def UpperCAmelCase ( a_ ) -> Union[str, Any]:
"""simple docstring"""
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase ) -> Optional[int]:
A_ : Optional[int] = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" , action="""store_true""" , help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" , action="""store_true""" , help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" , )
download_parser.add_argument("""model""" , type=_lowerCamelCase , help="""Name of the model to download""" )
download_parser.set_defaults(func=_lowerCamelCase )
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Optional[Any]:
A_ : Optional[int] = model
A_ : List[str] = cache
A_ : List[Any] = force
A_ : List[str] = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 385
|
'''simple docstring'''
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
UpperCamelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = r'\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n'
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
raise NotImplementedError("""StoppingCriteria needs to be subclassed""" )
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[Any]:
A_ : Optional[Any] = max_length
A_ : List[str] = max_position_embeddings
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
A_ : Dict = input_ids.shape[-1]
A_ : Optional[int] = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"""This is a friendly reminder - the current text generation call will exceed the model's predefined """
F"maximum length ({self.max_position_embeddings}). Depending on the model, you may observe "
"""exceptions, performance degradation, or nothing at all.""" )
return is_done
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
warnings.warn(
"""The class `MaxNewTokensCriteria` is deprecated. """
F"Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` "
"""with `max_length = start_length + max_new_tokens` instead.""" , _lowerCamelCase , )
A_ : Dict = start_length
A_ : List[Any] = max_new_tokens
A_ : Optional[int] = start_length + max_new_tokens
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
return input_ids.shape[-1] >= self.max_length
class _lowerCAmelCase ( __A ):
"""simple docstring"""
def __init__( self , _lowerCamelCase , _lowerCamelCase = None ) -> List[Any]:
A_ : List[Any] = max_time
A_ : Dict = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
return time.time() - self.initial_timestamp > self.max_time
class _lowerCAmelCase ( __A ):
"""simple docstring"""
@add_start_docstrings(_lowerCamelCase )
def __call__( self , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase ) -> bool:
return any(criteria(_lowerCamelCase , _lowerCamelCase ) for criteria in self )
@property
def UpperCAmelCase_ ( self ) -> Optional[int]:
for stopping_criterium in self:
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return stopping_criterium.max_length
elif isinstance(_lowerCamelCase , _lowerCamelCase ):
return stopping_criterium.max_length
return None
def UpperCAmelCase ( a_ , a_ ) -> StoppingCriteriaList:
"""simple docstring"""
A_ : Optional[Any] = stopping_criteria.max_length
A_ : Dict = deepcopy(a_ )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("""You set different `max_length` for stopping criteria and `max_length` parameter""" , a_ )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a_ ) )
return new_stopping_criteria
| 385
| 1
|
"""simple docstring"""
A = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355818,
}
def __A ( a_ :str , a_ :str , a_ :float) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a : Dict = (
F"""Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"""
F"""Valid values are: {", ".join(a_)}"""
)
raise ValueError(a_)
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''llama'''
__lowerCAmelCase = ['''past_key_values''']
def __init__( self , _UpperCAmelCase=32000 , _UpperCAmelCase=4096 , _UpperCAmelCase=11008 , _UpperCAmelCase=32 , _UpperCAmelCase=32 , _UpperCAmelCase=None , _UpperCAmelCase="silu" , _UpperCAmelCase=2048 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=None , **_UpperCAmelCase , ):
__a : Dict = vocab_size
__a : Union[str, Any] = max_position_embeddings
__a : str = hidden_size
__a : List[str] = intermediate_size
__a : Any = num_hidden_layers
__a : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__a : Union[str, Any] = num_attention_heads
__a : Optional[int] = num_key_value_heads
__a : Dict = hidden_act
__a : Union[str, Any] = initializer_range
__a : int = rms_norm_eps
__a : Optional[int] = pretraining_tp
__a : Optional[Any] = use_cache
__a : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , **_UpperCAmelCase , )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _UpperCAmelCase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
__a : Tuple = self.rope_scaling.get('''type''' , _UpperCAmelCase )
__a : Optional[int] = self.rope_scaling.get('''factor''' , _UpperCAmelCase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 52
| 1
|
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any]=3_2 , lowerCAmelCase__ : Dict=1_0 , lowerCAmelCase__ : int=1_0_0 , lowerCAmelCase__ : Optional[int]=1_0_2_6 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : Dict="data/tokenized_stories_train_wikitext103.jbl" , lowerCAmelCase__ : Optional[int]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
__a , __a : List[str] = generate_datasets(
lowerCAmelCase__ , lowerCAmelCase__ , number=lowerCAmelCase__ , min_len=1_0_2_6 , trim=lowerCAmelCase__ )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__a : Dict = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
# load pretrained model
__a : Optional[int] = load_gpta('''gpt2''' ).to(lowerCAmelCase__ )
print('''computing perplexity on objective set''' )
__a : List[str] = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).item()
print('''perplexity on objective set:''' , lowerCAmelCase__ )
# collect igf pairs and save to file demo.jbl
collect_objective_set(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any]=1_5 , lowerCAmelCase__ : Any=1_2_8 , lowerCAmelCase__ : Dict=1_0_0 , lowerCAmelCase__ : Any="igf_model.pt" , ):
set_seed(4_2 )
# Load pre-trained model
__a : Dict = GPTaLMHeadModel.from_pretrained('''gpt2''' )
# Initialize secondary learner to use embedding weights of model
__a : int = SecondaryLearner(lowerCAmelCase__ )
# Train secondary learner
__a : int = train_secondary_learner(
lowerCAmelCase__ , lowerCAmelCase__ , max_epochs=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , eval_freq=1_0_0 , igf_model_path=lowerCAmelCase__ , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __UpperCamelCase ( lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any]=3_2 , lowerCAmelCase__ : Union[str, Any]=1_0_0_0 , lowerCAmelCase__ : List[Any]=1_6 , lowerCAmelCase__ : Any=1.0 , lowerCAmelCase__ : List[str]=recopy_gpta , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Union[str, Any]=1_0 , lowerCAmelCase__ : Any="gpt2_finetuned.pt" , ):
__a : Optional[int] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' )
__a : int = RandomSampler(lowerCAmelCase__ )
__a : Union[str, Any] = DataLoader(lowerCAmelCase__ , sampler=lowerCAmelCase__ )
__a : Optional[int] = max_steps // (len(lowerCAmelCase__ )) + 1
__a : Optional[int] = 0
__a : Optional[int] = torch.zeros((1, context_len) , dtype=torch.long , device=lowerCAmelCase__ )
__a , __a , __a : Optional[Any] = recopy_model(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
model.train()
if secondary_learner is not None:
secondary_learner.to(lowerCAmelCase__ )
secondary_learner.eval()
__a : Union[str, Any] = []
__a : int = 0
__a : Optional[Any] = []
__a : int = []
# Compute the performance of the transformer model at the beginning
__a : int = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
test_perps.append(lowerCAmelCase__ )
print('''Test perplexity, step''' , lowerCAmelCase__ , ''':''' , lowerCAmelCase__ )
for epoch in range(int(lowerCAmelCase__ ) ):
for step, example in enumerate(lowerCAmelCase__ ):
torch.cuda.empty_cache()
__a : int = random.randint(0 , example.size(2 ) - context_len - 1 )
__a : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__a : int = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
__a : int = True
if secondary_learner is not None:
__a : Dict = secondary_learner.forward(
torch.tensor(lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(lowerCAmelCase__ ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
__a : List[str] = -1
if predicted_q < threshold:
__a : int = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__a : Tuple = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__a : Tuple = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__a : int = compute_perplexity(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
test_perps.append(lowerCAmelCase__ )
print('''Test perplexity, step''' , lowerCAmelCase__ , ''':''' , lowerCAmelCase__ )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , lowerCAmelCase__ )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __UpperCamelCase ( ):
__a : str = argparse.ArgumentParser(description='''Fine-tune a transformer model with IGF on a language modeling task''' )
# Required parameters
parser.add_argument(
'''--data_dir''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''The input data dir. Should contain data files for WikiText.''' , )
parser.add_argument(
'''--model_name_or_path''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , )
parser.add_argument(
'''--data_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help=(
'''A jbl file containing tokenized data which can be split as objective dataset, '''
'''train_dataset and test_dataset.'''
) , )
parser.add_argument(
'''--igf_data_file''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''A jbl file containing the context and information gain pairs to train secondary learner.''' , )
parser.add_argument(
'''--output_dir''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='''The output directory where the final fine-tuned model is stored.''' , )
parser.add_argument(
'''--tokenizer_name''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Pretrained tokenizer name or path if not the same as model_name''' , )
parser.add_argument('''--seed''' , type=lowerCAmelCase__ , default=lowerCAmelCase__ , help='''A seed for reproducible training.''' )
parser.add_argument(
'''--context_len''' , default=3_2 , type=lowerCAmelCase__ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--size_objective_set''' , default=1_0_0 , type=lowerCAmelCase__ , help='''number of articles that are long enough to be used as our objective set''' , )
parser.add_argument(
'''--eval_freq''' , default=1_0_0 , type=lowerCAmelCase__ , help='''secondary model evaluation is triggered at eval_freq''' )
parser.add_argument('''--max_steps''' , default=1_0_0_0 , type=lowerCAmelCase__ , help='''To calculate training epochs''' )
parser.add_argument(
'''--secondary_learner_batch_size''' , default=1_2_8 , type=lowerCAmelCase__ , help='''batch size of training data for secondary learner''' , )
parser.add_argument(
'''--batch_size''' , default=1_6 , type=lowerCAmelCase__ , help='''batch size of training data of language model(gpt2) ''' )
parser.add_argument(
'''--eval_interval''' , default=1_0 , type=lowerCAmelCase__ , help=(
'''decay the selectivity of our secondary learner filter from'''
'''1 standard deviation above average to 1 below average after 10 batches'''
) , )
parser.add_argument(
'''--number''' , default=1_0_0 , type=lowerCAmelCase__ , help='''The number of examples split to be used as objective_set/test_data''' )
parser.add_argument(
'''--min_len''' , default=1_0_2_6 , type=lowerCAmelCase__ , help='''The minimum length of the article to be used as objective set''' )
parser.add_argument(
'''--secondary_learner_max_epochs''' , default=1_5 , type=lowerCAmelCase__ , help='''number of epochs to train secondary learner''' )
parser.add_argument('''--trim''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''truncate the example if it exceeds context length''' )
parser.add_argument(
'''--threshold''' , default=1.0 , type=lowerCAmelCase__ , help=(
'''The threshold value used by secondary learner to filter the train_data and allow only'''
''' informative data as input to the model'''
) , )
parser.add_argument('''--finetuned_model_name''' , default='''gpt2_finetuned.pt''' , type=lowerCAmelCase__ , help='''finetuned_model_name''' )
parser.add_argument(
'''--recopy_model''' , default=lowerCAmelCase__ , type=lowerCAmelCase__ , help='''Reset the model to the original pretrained GPT-2 weights after each iteration''' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=lowerCAmelCase__ , data_file='''data/tokenized_stories_train_wikitext103.jbl''' , igf_data_file='''igf_context_pairs.jbl''' , )
# Load train data for secondary learner
__a : Any = joblib.load('''data/IGF_values.jbl''' )
# Train secondary learner
__a : List[Any] = training_secondary_learner(
lowerCAmelCase__ , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path='''igf_model.pt''' , )
# load pretrained gpt2 model
__a : Optional[int] = GPTaLMHeadModel.from_pretrained('''gpt2''' )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
__a , __a : str = generate_datasets(
context_len=3_2 , file='''data/tokenized_stories_train_wikitext103.jbl''' , number=1_0_0 , min_len=1_0_2_6 , trim=lowerCAmelCase__ )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=lowerCAmelCase__ , secondary_learner=lowerCAmelCase__ , eval_interval=1_0 , finetuned_model_name='''gpt2_finetuned.pt''' , )
if __name__ == "__main__":
main()
| 706
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
lowercase__ ='true'
def __UpperCamelCase ( lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Union[str, Any]=8_2 , lowerCAmelCase__ : List[str]=1_6 ):
set_seed(4_2 )
__a : Dict = RegressionModel()
__a : str = deepcopy(lowerCAmelCase__ )
__a : List[Any] = RegressionDataset(length=lowerCAmelCase__ )
__a : Tuple = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__ )
model.to(accelerator.device )
__a , __a : Any = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return model, ddp_model, dataloader
def __UpperCamelCase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : int=False ):
__a : Union[str, Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
__a : Any = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(lowerCAmelCase__ : Optional[int] ):
__a : Union[str, Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ )
return outputs
with accelerator.main_process_first():
__a : Any = dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
__a : Optional[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(lowerCAmelCase__ : Dict ):
if use_longest:
return tokenizer.pad(lowerCAmelCase__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(lowerCAmelCase__ , padding='''max_length''' , max_length=1_2_8 , return_tensors='''pt''' )
return DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1_6 )
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
__a : Any = Accelerator(dispatch_batches=lowerCAmelCase__ , split_batches=lowerCAmelCase__ )
__a : List[str] = get_dataloader(lowerCAmelCase__ , not dispatch_batches )
__a : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=lowerCAmelCase__ )
__a , __a : Tuple = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Dict ):
__a : List[Any] = []
for batch in dataloader:
__a , __a : Optional[int] = batch.values()
with torch.no_grad():
__a : Dict = model(lowerCAmelCase__ )
__a , __a : str = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__a , __a : str = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase__ )
targs.append(lowerCAmelCase__ )
__a , __a : Tuple = torch.cat(lowerCAmelCase__ ), torch.cat(lowerCAmelCase__ )
return logits, targs
def __UpperCamelCase ( lowerCAmelCase__ : Accelerator , lowerCAmelCase__ : List[Any]=8_2 , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Optional[int]=False , lowerCAmelCase__ : Any=1_6 ):
__a , __a , __a : List[Any] = get_basic_setup(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a : str = generate_predictions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
assert (
len(lowerCAmelCase__ ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase__ )}"
def __UpperCamelCase ( lowerCAmelCase__ : bool = False , lowerCAmelCase__ : bool = False ):
__a : List[Any] = evaluate.load('''glue''' , '''mrpc''' )
__a , __a : Optional[int] = get_mrpc_setup(lowerCAmelCase__ , lowerCAmelCase__ )
# First do baseline
__a , __a , __a : Any = setup['''no''']
model.to(lowerCAmelCase__ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase__ )
with torch.inference_mode():
__a : Union[str, Any] = model(**lowerCAmelCase__ )
__a : Tuple = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase__ , references=batch['''labels'''] )
__a : List[Any] = metric.compute()
# Then do distributed
__a , __a , __a : Any = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__a : Optional[int] = model(**lowerCAmelCase__ )
__a : Optional[Any] = outputs.logits.argmax(dim=-1 )
__a : Union[str, Any] = batch['''labels''']
__a , __a : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase__ , references=lowerCAmelCase__ )
__a : Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def __UpperCamelCase ( ):
__a : Optional[Any] = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__a : List[Any] = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__ )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(lowerCAmelCase__ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
__a : Dict = Accelerator()
test_torch_metrics(lowerCAmelCase__ , 5_1_2 )
accelerator.state._reset_state()
def __UpperCamelCase ( lowerCAmelCase__ : int ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 326
| 0
|
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
__UpperCamelCase : List[str] = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
__UpperCamelCase : List[str] = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
__UpperCamelCase : Dict = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
__UpperCamelCase : str = sorted(arg_to_scheduler.keys())
__UpperCamelCase : Dict = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class __SCREAMING_SNAKE_CASE( pl.LightningModule ):
def __init__( self: str , UpperCamelCase: argparse.Namespace , UpperCamelCase: str=None , UpperCamelCase: str="base" , UpperCamelCase: Tuple=None , UpperCamelCase: str=None , UpperCamelCase: str=None , **UpperCamelCase: Optional[Any] , ) -> Any:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase )
snake_case__ = 0
snake_case__ = Path(self.hparams.output_dir )
snake_case__ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
snake_case__ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase , **UpperCamelCase , )
else:
snake_case__ = config
snake_case__ = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase , UpperCamelCase ):
assert hasattr(self.config , UpperCamelCase ), F'''model config doesn\'t have a `{p}` attribute'''
setattr(self.config , UpperCamelCase , getattr(self.hparams , UpperCamelCase ) )
if tokenizer is None:
snake_case__ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase , )
else:
snake_case__ = tokenizer
snake_case__ = MODEL_MODES[mode]
if model is None:
snake_case__ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase , )
else:
snake_case__ = model
def lowerCAmelCase_ ( self: Tuple , *UpperCamelCase: Tuple , **UpperCamelCase: int ) -> List[Any]:
snake_case__ = self.model_type.from_pretrained(*UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] ) -> Dict:
snake_case__ = arg_to_scheduler[self.hparams.lr_scheduler]
snake_case__ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
snake_case__ = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Tuple:
snake_case__ = self.model
snake_case__ = ['bias', 'LayerNorm.weight']
snake_case__ = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
snake_case__ = Adafactor(
UpperCamelCase , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase , relative_step=UpperCamelCase )
else:
snake_case__ = AdamW(
UpperCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
snake_case__ = optimizer
snake_case__ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Union[str, Any] , UpperCamelCase: List[str] ) -> Tuple:
return self.validation_step(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Dict ) -> Any:
return self.validation_end(UpperCamelCase )
def lowerCAmelCase_ ( self: Any ) -> int:
snake_case__ = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
snake_case__ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCAmelCase_ ( self: Optional[int] , UpperCamelCase: Any ) -> List[str]:
if stage == "test":
snake_case__ = len(self.test_dataloader().dataset )
else:
snake_case__ = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=UpperCamelCase )
snake_case__ = len(self.train_dataloader().dataset )
def lowerCAmelCase_ ( self: Any , UpperCamelCase: str , UpperCamelCase: int , UpperCamelCase: bool = False ) -> Any:
raise NotImplementedError('You must implement this for your task' )
def lowerCAmelCase_ ( self: Any ) -> str:
return self.train_loader
def lowerCAmelCase_ ( self: str ) -> Optional[Any]:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[int]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Optional[Any] ) -> Optional[int]:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
UpperCamelCase , list(filter(UpperCamelCase , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def lowerCAmelCase_ ( self: Any , UpperCamelCase: Dict[str, Any] ) -> None:
snake_case__ = self.output_dir.joinpath('best_tfmr' )
snake_case__ = self.step_count
self.model.save_pretrained(UpperCamelCase )
self.tokenizer.save_pretrained(UpperCamelCase )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Tuple , UpperCamelCase: Optional[int] ) -> Any:
parser.add_argument(
'--model_name_or_path' , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=UpperCamelCase , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=UpperCamelCase , type=UpperCamelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(UpperCamelCase ).parent / 'test_run' / 'cache' ) , type=UpperCamelCase , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=UpperCamelCase , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=UpperCamelCase , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=UpperCamelCase , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=UpperCamelCase , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5e-5 , type=UpperCamelCase , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=UpperCamelCase , metavar=UpperCamelCase , type=UpperCamelCase , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=UpperCamelCase , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=UpperCamelCase , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=UpperCamelCase , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=UpperCamelCase , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=UpperCamelCase )
parser.add_argument('--train_batch_size' , default=32 , type=UpperCamelCase )
parser.add_argument('--eval_batch_size' , default=32 , type=UpperCamelCase )
parser.add_argument('--adafactor' , action='store_true' )
class __SCREAMING_SNAKE_CASE( pl.Callback ):
def lowerCAmelCase_ ( self: str , UpperCamelCase: List[str] , UpperCamelCase: str ) -> Union[str, Any]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __SCREAMING_SNAKE_CASE( pl.Callback ):
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: Any , UpperCamelCase: Any ) -> Dict:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase )
class __SCREAMING_SNAKE_CASE( pl.Callback ):
def lowerCAmelCase_ ( self: str , UpperCamelCase: Optional[Any] , UpperCamelCase: Any ) -> Union[str, Any]:
snake_case__ = trainer.lr_schedulers[0]['scheduler']
snake_case__ = {F'''lr_group_{i}''': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule ) -> Union[str, Any]:
rank_zero_info('***** Validation results *****' )
snake_case__ = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(UpperCamelCase , str(metrics[key] ) ) )
def lowerCAmelCase_ ( self: int , UpperCamelCase: pl.Trainer , UpperCamelCase: pl.LightningModule ) -> str:
rank_zero_info('***** Test results *****' )
snake_case__ = trainer.callback_metrics
# Log and save results to file
snake_case__ = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(UpperCamelCase , 'w' ) as writer:
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(UpperCamelCase , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(UpperCamelCase , str(metrics[key] ) ) )
def a_ ( _A , _A ) -> None:
"""simple docstring"""
# To allow all pl args uncomment the following line
# parser = pl.Trainer.add_argparse_args(parser)
parser.add_argument(
'--output_dir' , default=str(Path(_A ).parent / 'test_run' / 'model_checkpoints' ) , type=_A , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_A , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_A )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_A , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_A , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_A , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_A ).parent / 'test_run' / 'dummy-train-data' ) , type=_A , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def a_ ( _A , _A , _A=None , _A=True , _A=[] , _A=None , _A=None , **_A , ) -> Union[str, Any]:
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
snake_case__ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_A )
# add custom checkpoints
if checkpoint_callback is None:
snake_case__ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_A )
if logging_callback is None:
snake_case__ = LoggingCallback()
snake_case__ = {}
if args.fpaa:
snake_case__ = 16
if args.gpus > 1:
snake_case__ = 'auto'
snake_case__ = 'ddp'
snake_case__ = args.accumulate_grad_batches
snake_case__ = None
snake_case__ = 'auto'
snake_case__ = pl.Trainer.from_argparse_args(
_A , weights_summary=_A , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_A , val_check_interval=1 , num_sanity_val_steps=2 , **_A , )
if args.do_train:
trainer.fit(_A )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 328
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "openai/whisper-base"
_UpperCAmelCase = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
_UpperCAmelCase = "transcriber"
_UpperCAmelCase = WhisperProcessor
_UpperCAmelCase = WhisperForConditionalGeneration
_UpperCAmelCase = ["audio"]
_UpperCAmelCase = ["text"]
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: Union[str, Any] ) -> Dict:
return self.pre_processor(UpperCamelCase , return_tensors='pt' ).input_features
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: Optional[int] ) -> Optional[int]:
return self.model.generate(inputs=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: List[str] ) -> str:
return self.pre_processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )[0]
| 328
| 1
|
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=13 , UpperCAmelCase : str=7 , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : List[Any]=64 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=2 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : str=1 , ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] =parent
lowercase : int =batch_size
lowercase : str =seq_length
lowercase : Tuple =is_training
lowercase : List[str] =use_input_mask
lowercase : Optional[int] =use_token_type_ids
lowercase : Any =use_labels
lowercase : List[Any] =vocab_size
lowercase : str =hidden_size
lowercase : str =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : int =intermediate_size
lowercase : Optional[int] =hidden_act
lowercase : Optional[Any] =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Union[str, Any] =max_position_embeddings
lowercase : Optional[int] =type_vocab_size
lowercase : Dict =type_sequence_label_size
lowercase : Optional[int] =initializer_range
lowercase : List[Any] =num_labels
lowercase : Any =num_choices
lowercase : str =scope
lowercase : Any =q_groups
lowercase : Any =k_groups
lowercase : Union[str, Any] =v_groups
lowercase : int =post_attention_groups
lowercase : str =intermediate_groups
lowercase : Union[str, Any] =output_groups
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : Tuple =None
if self.use_labels:
lowercase : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : int =ids_tensor([self.batch_size] , self.num_choices )
lowercase : str =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =SqueezeBertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[str] =model(UpperCAmelCase , UpperCAmelCase )
lowercase : Any =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =SqueezeBertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Any =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
'''simple docstring'''
lowercase : Any =SqueezeBertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[str] =self.num_labels
lowercase : Dict =SqueezeBertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Union[str, Any] =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.num_labels
lowercase : Dict =SqueezeBertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =self.num_choices
lowercase : Union[str, Any] =SqueezeBertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =self.prepare_config_and_inputs()
(lowercase) : List[Any] =config_and_inputs
lowercase : Dict ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = False
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =SqueezeBertModelTester(self )
lowercase : Dict =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCAmelCase )
@slow
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] =SqueezeBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : int =SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
lowercase : str =torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
lowercase : Any =model(UpperCAmelCase )[0]
lowercase : Tuple =torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : int =torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-4 ) )
| 702
|
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from typing import List
from unittest.mock import Mock
import torch
from torch.utils.data import DataLoader, IterableDataset, TensorDataset
from accelerate.accelerator import Accelerator
from accelerate.utils.dataclasses import DistributedType
class snake_case__ ( __A ):
def __init__( self , UpperCamelCase_ ) -> str:
"""simple docstring"""
a_ : int = data
def __iter__( self ) -> List[Any]:
"""simple docstring"""
for element in self.data:
yield element
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple=True ):
"""simple docstring"""
a_ : Dict = Accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
assert accelerator.num_processes == 2, "this script expects that two GPUs are available"
return accelerator
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : bool = False ):
"""simple docstring"""
if iterable:
a_ : Union[str, Any] = DummyIterableDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE_ ) ) )
else:
a_ : Any = TensorDataset(torch.as_tensor(range(SCREAMING_SNAKE_CASE_ ) ) )
a_ : List[str] = DataLoader(SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
a_ : str = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
return dl
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : List[int] , ):
"""simple docstring"""
a_ : int = create_dataloader(accelerator=SCREAMING_SNAKE_CASE_ , dataset_size=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
a_ : Optional[int] = [len(batch[0] ) for batch in dl]
if accelerator.process_index == 0:
assert batch_sizes == process_0_expected_batch_sizes
elif accelerator.process_index == 1:
assert batch_sizes == process_1_expected_batch_sizes
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : List[Any] = create_accelerator()
# without padding, we would expect a different number of batches
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1, 1] , )
# without padding, we would expect the same number of batches, but different sizes
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 2] , )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : int = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , process_0_expected_batch_sizes=[1, 1] , process_1_expected_batch_sizes=[1] , )
verify_dataloader_batch_sizes(
SCREAMING_SNAKE_CASE_ , dataset_size=7 , batch_size=2 , process_0_expected_batch_sizes=[2, 2] , process_1_expected_batch_sizes=[2, 1] , )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : Optional[Any] = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
a_ : Tuple = torch.nn.Linear(1 , 1 )
a_ : Optional[int] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
a_ : int = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
a_ : str = []
with accelerator.join_uneven_inputs([ddp_model] ):
for batch_idx, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
a_ : str = ddp_model(batch[0].float() )
a_ : Any = output.sum()
loss.backward()
batch_idxs.append(SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
assert batch_idxs == [0, 1]
elif accelerator.process_index == 1:
assert batch_idxs == [0]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ):
"""simple docstring"""
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as w:
with accelerator.join_uneven_inputs([Mock()] ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE_ )
assert "only supported for multi-GPU" in str(w[-1].message )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : Dict = True
a_ : List[Any] = False
a_ : Union[str, Any] = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
a_ : Any = torch.nn.Linear(1 , 1 )
a_ : Optional[int] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
a_ : Union[str, Any] = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
a_ : int = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
a_ : str = train_dl.batch_sampler.even_batches
a_ : Union[str, Any] = valid_dl.batch_sampler.even_batches
assert train_dl_overridden_value == overridden_even_batches
assert valid_dl_overridden_value == overridden_even_batches
assert train_dl.batch_sampler.even_batches == default_even_batches
assert valid_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : Union[str, Any] = True
a_ : List[str] = False
a_ : Any = create_accelerator(even_batches=SCREAMING_SNAKE_CASE_ )
a_ : Tuple = torch.nn.Linear(1 , 1 )
a_ : str = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE_ )
a_ : List[Any] = create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 )
with warnings.catch_warnings():
warnings.filterwarnings("""ignore""" )
try:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
a_ : Optional[int] = batch_dl.batch_sampler.even_batches
except AttributeError:
# ensure attribute error is not raised when processing iterable dl
raise AssertionError
assert batch_dl_overridden_value == overridden_even_batches
assert batch_dl.batch_sampler.even_batches == default_even_batches
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : Tuple = create_accelerator()
a_ : Tuple = torch.nn.Linear(1 , 1 )
a_ : List[Any] = accelerator.prepare(SCREAMING_SNAKE_CASE_ )
create_dataloader(SCREAMING_SNAKE_CASE_ , dataset_size=3 , batch_size=1 , iterable=SCREAMING_SNAKE_CASE_ )
with warnings.catch_warnings(record=SCREAMING_SNAKE_CASE_ ) as w:
with accelerator.join_uneven_inputs([ddp_model] , even_batches=SCREAMING_SNAKE_CASE_ ):
pass
assert issubclass(w[-1].category , SCREAMING_SNAKE_CASE_ )
assert "only supported for map-style datasets" in str(w[-1].message )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : str = create_accelerator()
accelerator.print("""Test that even_batches variable ensures uniform batches across processes""" )
test_default_ensures_even_batch_sizes()
accelerator.print("""Run tests with even_batches disabled""" )
test_can_disable_even_batches()
accelerator.print("""Test joining uneven inputs""" )
test_can_join_uneven_inputs()
accelerator.print("""Test overriding even_batches when joining uneven inputs""" )
test_join_can_override_even_batches()
accelerator.print("""Test overriding even_batches for mixed dataloader types""" )
test_join_can_override_for_mixed_type_dataloaders()
accelerator.print("""Test overriding even_batches raises a warning for iterable dataloaders""" )
test_join_raises_warning_for_iterable_when_overriding_even_batches()
accelerator.print("""Test join with non DDP distributed raises warning""" )
a_ : Optional[Any] = accelerator.state.distributed_type
a_ : Tuple = DistributedType.FSDP
test_join_raises_warning_for_non_ddp_distributed(SCREAMING_SNAKE_CASE_ )
a_ : Optional[Any] = original_state
if __name__ == "__main__":
main()
| 419
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"google/switch-base-8": "https://huggingface.co/google/switch-base-8/blob/main/config.json",
}
class snake_case__ ( __A ):
UpperCAmelCase : Tuple = """switch_transformers"""
UpperCAmelCase : Optional[int] = ["""past_key_values"""]
UpperCAmelCase : List[Any] = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , UpperCamelCase_=32128 , UpperCamelCase_=768 , UpperCamelCase_=64 , UpperCamelCase_=2048 , UpperCamelCase_=64 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=12 , UpperCamelCase_=8 , UpperCamelCase_=False , UpperCamelCase_=0.01 , UpperCamelCase_="float32" , UpperCamelCase_=False , UpperCamelCase_=32 , UpperCamelCase_=128 , UpperCamelCase_=0.1 , UpperCamelCase_=1e-6 , UpperCamelCase_=0.001 , UpperCamelCase_=0.001 , UpperCamelCase_=1.0 , UpperCamelCase_="relu" , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=1 , **UpperCamelCase_ , ) -> str:
"""simple docstring"""
a_ : str = vocab_size
a_ : Dict = d_model
a_ : int = d_kv
a_ : Optional[int] = d_ff
a_ : str = num_sparse_encoder_layers
a_ : List[str] = num_layers
a_ : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a_ : Union[str, Any] = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
a_ : str = self.num_layers // self.num_sparse_encoder_layers
else:
a_ : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
a_ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
a_ : str = self.num_decoder_layers # HACK: this will create 0 sparse layers
a_ : List[str] = num_heads
a_ : Any = num_experts
a_ : List[Any] = expert_capacity
a_ : Any = router_bias
a_ : str = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
a_ : Optional[int] = router_dtype
a_ : List[Any] = router_ignore_padding_tokens
a_ : Union[str, Any] = relative_attention_num_buckets
a_ : List[str] = relative_attention_max_distance
a_ : List[Any] = dropout_rate
a_ : Any = layer_norm_epsilon
a_ : Tuple = initializer_factor
a_ : Optional[int] = feed_forward_proj
a_ : Dict = use_cache
a_ : str = add_router_probs
a_ : Dict = router_z_loss_coef
a_ : Any = router_aux_loss_coef
a_ : Union[str, Any] = self.feed_forward_proj.split("""-""" )
a_ : str = act_info[-1]
a_ : Optional[Any] = act_info[0] == """gated"""
if len(UpperCamelCase_ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase_ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
a_ : Optional[Any] = """gelu_new"""
super().__init__(
pad_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , is_encoder_decoder=UpperCamelCase_ , **UpperCamelCase_ , )
| 419
| 1
|
from math import sqrt
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' must been an int and positive"
UpperCamelCase_ : Union[str, Any] = True
# 0 and 1 are none primes.
if number <= 1:
UpperCamelCase_ : Tuple = False
for divisor in range(2 , int(round(sqrt(_SCREAMING_SNAKE_CASE ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
UpperCamelCase_ : Union[str, Any] = False
break
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'status' must been from type bool"
return status
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
UpperCamelCase_ : Dict = list(range(2 , n + 1 ) )
UpperCamelCase_ : Union[str, Any] = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(_SCREAMING_SNAKE_CASE ) ):
for j in range(i + 1 , len(_SCREAMING_SNAKE_CASE ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
UpperCamelCase_ : Optional[int] = 0
# filters actual prime numbers.
UpperCamelCase_ : Union[str, Any] = [x for x in begin_list if x != 0]
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n > 2), "'N' must been an int and > 2"
UpperCamelCase_ : List[Any] = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(_SCREAMING_SNAKE_CASE ):
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and number >= 0, "'number' must been an int and >= 0"
UpperCamelCase_ : Union[str, Any] = [] # this list will be returns of the function.
# potential prime number factors.
UpperCamelCase_ : Optional[int] = 2
UpperCamelCase_ : Dict = number
if number == 0 or number == 1:
ans.append(_SCREAMING_SNAKE_CASE )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(_SCREAMING_SNAKE_CASE ):
while quotient != 1:
if is_prime(_SCREAMING_SNAKE_CASE ) and (quotient % factor == 0):
ans.append(_SCREAMING_SNAKE_CASE )
quotient /= factor
else:
factor += 1
else:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type list"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase_ : Tuple = 0
# prime factorization of 'number'
UpperCamelCase_ : int = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Any = max(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number >= 0
), "'number' bust been an int and >= 0"
UpperCamelCase_ : Any = 0
# prime factorization of 'number'
UpperCamelCase_ : Dict = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Union[str, Any] = min(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'ans' must been from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : str ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 == 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 == 0
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "'number' must been an int"
assert isinstance(number % 2 != 0 , _SCREAMING_SNAKE_CASE ), "compare bust been from type bool"
return number % 2 != 0
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (number > 2) and is_even(_SCREAMING_SNAKE_CASE )
), "'number' must been an int, even and > 2"
UpperCamelCase_ : Tuple = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
UpperCamelCase_ : int = get_prime_numbers(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : str = len(_SCREAMING_SNAKE_CASE )
# run variable for while-loops.
UpperCamelCase_ : Optional[int] = 0
UpperCamelCase_ : Union[str, Any] = None
# exit variable. for break up the loops
UpperCamelCase_ : Any = True
while i < len_pn and loop:
UpperCamelCase_ : Optional[Any] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
UpperCamelCase_ : Union[str, Any] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (len(_SCREAMING_SNAKE_CASE ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase_ : List[Any] = 0
while numbera != 0:
UpperCamelCase_ : List[str] = numbera % numbera
UpperCamelCase_ : Optional[Any] = numbera
UpperCamelCase_ : Optional[Any] = rest
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
UpperCamelCase_ : int = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
UpperCamelCase_ : Optional[Any] = prime_factorization(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : Optional[Any] = prime_factorization(_SCREAMING_SNAKE_CASE )
elif numbera == 1 or numbera == 1:
UpperCamelCase_ : int = []
UpperCamelCase_ : Dict = []
UpperCamelCase_ : List[Any] = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase_ : int = 0
UpperCamelCase_ : int = 0
UpperCamelCase_ : Union[str, Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
UpperCamelCase_ : List[str] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
UpperCamelCase_ : List[Any] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ):
ans *= n
else:
UpperCamelCase_ : Tuple = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
UpperCamelCase_ : List[Any] = prime_fac_a.count(_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE ):
ans *= n
done.append(_SCREAMING_SNAKE_CASE )
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[Any] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'number' must been a positive int"
UpperCamelCase_ : str = 0
UpperCamelCase_ : Tuple = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
ans += 1
# precondition
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and is_prime(
_SCREAMING_SNAKE_CASE ), "'ans' must been a prime number and from type int"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
assert (
is_prime(_SCREAMING_SNAKE_CASE ) and is_prime(_SCREAMING_SNAKE_CASE ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
UpperCamelCase_ : Optional[int] = p_number_a + 1 # jump to the next number
UpperCamelCase_ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
while number < p_number_a:
ans.append(_SCREAMING_SNAKE_CASE )
number += 1
# fetch the next prime number.
while not is_prime(_SCREAMING_SNAKE_CASE ):
number += 1
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and ans[0] != p_number_a
and ans[len(_SCREAMING_SNAKE_CASE ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : List[str] ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 1), "'n' must been int and >= 1"
UpperCamelCase_ : Any = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(_SCREAMING_SNAKE_CASE )
# precondition
assert ans[0] == 1 and ans[len(_SCREAMING_SNAKE_CASE ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Dict ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (
number > 1
), "'number' must been an int and >= 1"
UpperCamelCase_ : Dict = get_divisors(_SCREAMING_SNAKE_CASE )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (divisors[0] == 1)
and (divisors[len(_SCREAMING_SNAKE_CASE ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ):
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
UpperCamelCase_ : List[Any] = gcd(abs(_SCREAMING_SNAKE_CASE ) , abs(_SCREAMING_SNAKE_CASE ) )
# precondition
assert (
isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : int ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been a int and >= 0"
UpperCamelCase_ : Dict = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE : Tuple ):
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and (n >= 0), "'n' must been an int and >= 0"
UpperCamelCase_ : Tuple = 0
UpperCamelCase_ : Optional[int] = 1
UpperCamelCase_ : List[str] = 1 # this will be return
for _ in range(n - 1 ):
UpperCamelCase_ : List[Any] = ans
ans += fiba
UpperCamelCase_ : Any = tmp
return ans
| 138
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
a__ :List[str] = StableDiffusionLDMaDPipeline
a__ :str = TEXT_TO_IMAGE_PARAMS
a__ :Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
a__ :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
def A_ (self ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCamelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
UpperCamelCase_ : Dict = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
UpperCamelCase_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCamelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
UpperCamelCase_ : Tuple = CLIPTextModel(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCamelCase_ : Optional[Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A_ (self , __UpperCamelCase , __UpperCamelCase=0 ) -> Optional[Any]:
if str(__UpperCamelCase ).startswith("""mps""" ):
UpperCamelCase_ : Any = torch.manual_seed(__UpperCamelCase )
else:
UpperCamelCase_ : str = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : List[str] = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> str:
UpperCamelCase_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : List[str] = self.get_dummy_components()
UpperCamelCase_ : List[Any] = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : List[str] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = output.rgb, output.depth
UpperCamelCase_ : Optional[Any] = rgb[0, -3:, -3:, -1]
UpperCamelCase_ : Union[str, Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase_ : str = np.array(
[0.37_338_176, 0.70_247, 0.74_203_193, 0.51_643_604, 0.58_256_793, 0.60_932_136, 0.4_181_095, 0.48_355_877, 0.46_535_262] )
UpperCamelCase_ : Optional[Any] = np.array([103.46_727, 85.812_004, 87.849_236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def A_ (self ) -> List[Any]:
UpperCamelCase_ : Tuple = self.get_dummy_components()
UpperCamelCase_ : int = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Tuple = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : List[str] = 3 * [inputs["""prompt"""]]
# forward
UpperCamelCase_ : List[Any] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : int = output.rgb, output.depth
UpperCamelCase_ : Dict = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase_ : Optional[Any] = depth_slice_a[0, -3:, -1]
UpperCamelCase_ : Any = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : Optional[int] = 3 * [inputs.pop("""prompt""" )]
UpperCamelCase_ : Optional[int] = ldmad_pipe.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=__UpperCamelCase , return_tensors="""pt""" , )
UpperCamelCase_ : List[Any] = text_inputs["""input_ids"""].to(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = ldmad_pipe.text_encoder(__UpperCamelCase )[0]
UpperCamelCase_ : Optional[int] = prompt_embeds
# forward
UpperCamelCase_ : Any = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : Union[str, Any] = rgb_slice_a[0, -3:, -3:, -1]
UpperCamelCase_ : Union[str, Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def A_ (self ) -> str:
UpperCamelCase_ : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
UpperCamelCase_ : int = self.get_dummy_components()
UpperCamelCase_ : List[str] = PNDMScheduler(skip_prk_steps=__UpperCamelCase )
UpperCamelCase_ : int = StableDiffusionLDMaDPipeline(**__UpperCamelCase )
UpperCamelCase_ : Optional[int] = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = self.get_dummy_inputs(__UpperCamelCase )
UpperCamelCase_ : str = """french fries"""
UpperCamelCase_ : Dict = ldmad_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : List[str] = output.rgb, output.depth
UpperCamelCase_ : Dict = rgb[0, -3:, -3:, -1]
UpperCamelCase_ : List[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
UpperCamelCase_ : int = np.array(
[0.37_044, 0.71_811_503, 0.7_223_251, 0.48_603_675, 0.5_638_391, 0.6_364_948, 0.42_833_704, 0.4_901_315, 0.47_926_217] )
UpperCamelCase_ : Tuple = np.array([107.84_738, 84.62_802, 89.962_135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 ) -> List[str]:
UpperCamelCase_ : List[Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : List[str] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ : Dict = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
UpperCamelCase_ : Any = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> Optional[Any]:
UpperCamelCase_ : Tuple = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" )
UpperCamelCase_ : Dict = ldmad_pipe.to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : str = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : Tuple = rgb[0, -3:, -3:, -1].flatten()
UpperCamelCase_ : int = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
UpperCamelCase_ : str = np.array(
[0.53_805_465, 0.56_707_305, 0.5_486_515, 0.57_012_236, 0.5_814_511, 0.56_253_487, 0.54_843_014, 0.55_092_263, 0.6_459_706] )
UpperCamelCase_ : List[str] = np.array(
[0.9_263_781, 0.6_678_672, 0.5_486_515, 0.92_202_145, 0.67_831_135, 0.56_253_487, 0.9_241_694, 0.7_551_478, 0.6_459_706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
def A_ (self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ (self , __UpperCamelCase , __UpperCamelCase="cpu" , __UpperCamelCase=torch.floataa , __UpperCamelCase=0 ) -> Any:
UpperCamelCase_ : Union[str, Any] = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
UpperCamelCase_ : Optional[Any] = np.random.RandomState(__UpperCamelCase ).standard_normal((1, 4, 64, 64) )
UpperCamelCase_ : Any = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase , dtype=__UpperCamelCase )
UpperCamelCase_ : Dict = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 50,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def A_ (self ) -> Optional[int]:
UpperCamelCase_ : Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d""" ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Union[str, Any] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : List[str] = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Tuple = output.rgb, output.depth
UpperCamelCase_ : Any = 0.495_586
UpperCamelCase_ : Dict = 0.33_795_515
UpperCamelCase_ : Optional[int] = 112.48_518
UpperCamelCase_ : Union[str, Any] = 98.489_746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def A_ (self ) -> str:
UpperCamelCase_ : Optional[Any] = StableDiffusionLDMaDPipeline.from_pretrained("""Intel/ldm3d-4c""" ).to(__UpperCamelCase )
ldmad_pipe.set_progress_bar_config(disable=__UpperCamelCase )
UpperCamelCase_ : Optional[int] = self.get_inputs(__UpperCamelCase )
UpperCamelCase_ : Tuple = ldmad_pipe(**__UpperCamelCase )
UpperCamelCase_,UpperCamelCase_ : Optional[int] = output.rgb, output.depth
UpperCamelCase_ : int = 0.4_194_127
UpperCamelCase_ : Optional[Any] = 0.35_375_586
UpperCamelCase_ : Optional[Any] = 0.5_638_502
UpperCamelCase_ : List[Any] = 0.34_686_103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 138
| 1
|
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,__UpperCamelCase : List[str] ):
"""simple docstring"""
A_ = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
A_ = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
A_ = f'''{src_lang}-{tgt_lang}'''
A_ = f'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(__UpperCamelCase ,exist_ok=__UpperCamelCase )
A_ = os.path.join(__UpperCamelCase ,"README.md" )
print(f'''Generating {path}''' )
with open(__UpperCamelCase ,"w" ,encoding="utf-8" ) as f:
f.write(__UpperCamelCase )
# make sure we are under the root of the project
__a :Optional[Any] = Path(__file__).resolve().parent.parent.parent
__a :Optional[Any] = repo_dir / 'model_cards'
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__a , __a , __a :int = model_name.split('-')
__a :str = model_cards_dir / 'facebook' / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 86
|
def _lowercase ( SCREAMING_SNAKE_CASE_ : int = 10 , SCREAMING_SNAKE_CASE_ : int = 22 ):
"""simple docstring"""
UpperCamelCase = range(1 , SCREAMING_SNAKE_CASE_ )
UpperCamelCase = range(1 , SCREAMING_SNAKE_CASE_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F'''{solution(10, 22) = }''')
| 386
| 0
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowercase :
def __init__( self , A_ , A_=13 , A_=10 , A_=3 , A_=2 , A_=2 , A_=True , A_=True , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=10 , A_=0.02 , A_="divided_space_time" , A_=None , ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[Any] = parent
__lowerCAmelCase : str = batch_size
__lowerCAmelCase : Any = image_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : List[str] = patch_size
__lowerCAmelCase : Any = num_frames
__lowerCAmelCase : int = is_training
__lowerCAmelCase : List[str] = use_labels
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : Tuple = num_hidden_layers
__lowerCAmelCase : Dict = num_attention_heads
__lowerCAmelCase : str = intermediate_size
__lowerCAmelCase : str = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = attention_type
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Optional[Any] = scope
__lowerCAmelCase : int = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__lowerCAmelCase : str = (image_size // patch_size) ** 2
__lowerCAmelCase : List[str] = (num_frames) * self.num_patches_per_frame + 1
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : Dict = None
if self.use_labels:
__lowerCAmelCase : str = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : List[Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__lowerCAmelCase : Union[str, Any] = self.num_labels
return config
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : str = TimesformerModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowerCAmelCase : Optional[Any] = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , A_ , A_ , A_ ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = TimesformerForVideoClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
__lowerCAmelCase : Dict = model(__UpperCamelCase )
# verify the logits shape
__lowerCAmelCase : Union[str, Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __UpperCamelCase )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
__lowerCAmelCase : List[Any] = self.prepare_config_and_inputs()
__lowerCAmelCase, __lowerCAmelCase, __lowerCAmelCase : Optional[int] = config_and_inputs
__lowerCAmelCase : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __lowercase (_UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
_UpperCamelCase = (
{'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
def UpperCamelCase__ ( self ) ->List[str]:
'''simple docstring'''
__lowerCAmelCase : Dict = TimesformerModelTester(self )
__lowerCAmelCase : Optional[Any] = ConfigTester(
self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def UpperCamelCase__ ( self , A_ , A_ , A_=False ) ->Any:
'''simple docstring'''
__lowerCAmelCase : str = copy.deepcopy(__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
__lowerCAmelCase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
pass
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase, __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : str = model_class(__UpperCamelCase )
__lowerCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : str = [*signature.parameters.keys()]
__lowerCAmelCase : Dict = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def UpperCamelCase__ ( self ) ->Tuple:
'''simple docstring'''
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__UpperCamelCase )
@slow
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = TimesformerModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
if not self.has_attentions:
pass
else:
__lowerCAmelCase, __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : str = True
for model_class in self.all_model_classes:
__lowerCAmelCase : Dict = self.model_tester.seq_length
__lowerCAmelCase : Union[str, Any] = self.model_tester.num_frames
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Tuple = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Optional[Any] = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__lowerCAmelCase : Any = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Optional[int] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__lowerCAmelCase : Optional[Any] = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__lowerCAmelCase : List[str] = len(__UpperCamelCase )
# Check attention is always last and order is fine
__lowerCAmelCase : Any = True
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[Any] = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase : int = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 1 , len(__UpperCamelCase ) )
__lowerCAmelCase : Dict = outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def UpperCamelCase__ ( self ) ->Any:
'''simple docstring'''
def check_hidden_states_output(A_ , A_ , A_ ):
__lowerCAmelCase : str = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
__lowerCAmelCase : Any = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
__lowerCAmelCase : Union[str, Any] = outputs.hidden_states
__lowerCAmelCase : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
__lowerCAmelCase : List[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[int] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : List[str] = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _lowercase ( ):
__lowerCAmelCase : List[Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__lowerCAmelCase : List[Any] = np.load(_A )
return list(_A )
@require_torch
@require_vision
class __lowercase (unittest.TestCase ):
@cached_property
def UpperCamelCase__ ( self ) ->List[Any]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[Any] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = self.default_image_processor
__lowerCAmelCase : Union[str, Any] = prepare_video()
__lowerCAmelCase : Optional[int] = image_processor(video[:8] , return_tensors='''pt''' ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(**__UpperCamelCase )
# verify the logits
__lowerCAmelCase : Optional[int] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
__lowerCAmelCase : Dict = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
| 719
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 0.0
_UpperCamelCase = 1
_UpperCamelCase = 1
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->Optional[Any]:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = []
__lowerCAmelCase : int = []
for i in range(self.num_layers ):
__lowerCAmelCase : str = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase : Any = FlaxResnetBlockaD(
in_channels=A_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A_ )
__lowerCAmelCase : int = resnets
__lowerCAmelCase : Any = attentions
if self.add_downsample:
__lowerCAmelCase : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A_ , A_ , A_ , A_=True ) ->int:
'''simple docstring'''
__lowerCAmelCase : Union[str, Any] = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCAmelCase : Optional[int] = resnet(A_ , A_ , deterministic=A_ )
__lowerCAmelCase : Union[str, Any] = attn(A_ , A_ , deterministic=A_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase : Any = self.downsamplers_a(A_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 0.0
_UpperCamelCase = 1
_UpperCamelCase = True
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[Any] = []
for i in range(self.num_layers ):
__lowerCAmelCase : str = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase : Any = FlaxResnetBlockaD(
in_channels=A_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase : List[str] = resnets
if self.add_downsample:
__lowerCAmelCase : List[str] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A_ , A_ , A_=True ) ->str:
'''simple docstring'''
__lowerCAmelCase : List[str] = ()
for resnet in self.resnets:
__lowerCAmelCase : Optional[int] = resnet(A_ , A_ , deterministic=A_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase : Dict = self.downsamplers_a(A_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 0.0
_UpperCamelCase = 1
_UpperCamelCase = 1
_UpperCamelCase = True
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Any = []
__lowerCAmelCase : List[Any] = []
for i in range(self.num_layers ):
__lowerCAmelCase : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase : List[str] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase : int = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase : Tuple = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A_ )
__lowerCAmelCase : int = resnets
__lowerCAmelCase : List[str] = attentions
if self.add_upsample:
__lowerCAmelCase : int = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A_ , A_ , A_ , A_ , A_=True ) ->List[str]:
'''simple docstring'''
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCAmelCase : Optional[Any] = res_hidden_states_tuple[-1]
__lowerCAmelCase : Optional[int] = res_hidden_states_tuple[:-1]
__lowerCAmelCase : Dict = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase : Optional[Any] = resnet(A_ , A_ , deterministic=A_ )
__lowerCAmelCase : Optional[int] = attn(A_ , A_ , deterministic=A_ )
if self.add_upsample:
__lowerCAmelCase : Tuple = self.upsamplers_a(A_ )
return hidden_states
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 42
_UpperCamelCase = 0.0
_UpperCamelCase = 1
_UpperCamelCase = True
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->Optional[int]:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = []
for i in range(self.num_layers ):
__lowerCAmelCase : Tuple = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase : List[str] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase : Tuple = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase : Optional[Any] = resnets
if self.add_upsample:
__lowerCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , A_ , A_ , A_ , A_=True ) ->Optional[Any]:
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
__lowerCAmelCase : List[Any] = res_hidden_states_tuple[-1]
__lowerCAmelCase : Union[str, Any] = res_hidden_states_tuple[:-1]
__lowerCAmelCase : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase : Union[str, Any] = resnet(A_ , A_ , deterministic=A_ )
if self.add_upsample:
__lowerCAmelCase : Dict = self.upsamplers_a(A_ )
return hidden_states
class __lowercase (nn.Module ):
_UpperCamelCase = 42
_UpperCamelCase = 0.0
_UpperCamelCase = 1
_UpperCamelCase = 1
_UpperCamelCase = False
_UpperCamelCase = False
_UpperCamelCase = jnp.floataa
def UpperCamelCase__ ( self ) ->Dict:
'''simple docstring'''
__lowerCAmelCase : Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCAmelCase : Tuple = []
for _ in range(self.num_layers ):
__lowerCAmelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(A_ )
__lowerCAmelCase : List[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(A_ )
__lowerCAmelCase : List[Any] = resnets
__lowerCAmelCase : Any = attentions
def __call__( self , A_ , A_ , A_ , A_=True ) ->int:
'''simple docstring'''
__lowerCAmelCase : str = self.resnets[0](A_ , A_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCAmelCase : str = attn(A_ , A_ , deterministic=A_ )
__lowerCAmelCase : Tuple = resnet(A_ , A_ , deterministic=A_ )
return hidden_states
| 583
| 0
|
'''simple docstring'''
import argparse
import json
import subprocess
def a__ ( lowercase : Union[str, Any], lowercase : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = []
_UpperCamelCase = (
F"""curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""""
''' https://api.github.com/repos/huggingface/transformers/actions/runners'''
)
_UpperCamelCase = subprocess.run(lowercase, shell=lowercase, stdout=subprocess.PIPE )
_UpperCamelCase = output.stdout.decode('''utf-8''' )
_UpperCamelCase = json.loads(lowercase )
_UpperCamelCase = status['''runners''']
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(lowercase )
# save the result so we can report them on Slack
with open('''offline_runners.txt''', '''w''' ) as fp:
fp.write(json.dumps(lowercase ) )
if len(lowercase ) > 0:
_UpperCamelCase = '''\n'''.join([x['''name'''] for x in offline_runners] )
raise ValueError(F"""The following runners are offline:\n{failed}""" )
if __name__ == "__main__":
def a__ ( lowercase : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return values.split(''',''' )
lowercase__ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--target_runners',
default=None,
type=list_str,
required=True,
help='Comma-separated list of runners to check status.',
)
parser.add_argument(
'--token', default=None, type=str, required=True, help='A token that has actions:read permission.'
)
lowercase__ : Union[str, Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 98
|
'''simple docstring'''
def a__ ( lowercase : str ) -> int:
"""simple docstring"""
assert column_title.isupper()
_UpperCamelCase = 0
_UpperCamelCase = len(lowercase ) - 1
_UpperCamelCase = 0
while index >= 0:
_UpperCamelCase = (ord(column_title[index] ) - 64) * pow(26, lowercase )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 98
| 1
|
"""simple docstring"""
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
SCREAMING_SNAKE_CASE__ = {
"debug": logging.DEBUG,
"info": logging.INFO,
"warning": logging.WARNING,
"error": logging.ERROR,
"critical": logging.CRITICAL,
}
SCREAMING_SNAKE_CASE__ = logging.WARNING
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = os.getenv("""DATASETS_VERBOSITY""" , SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F'Unknown option DATASETS_VERBOSITY={env_level_str}, '
F'has to be one of: { ", ".join(log_levels.keys() ) }' )
return _default_log_level
def UpperCAmelCase__ ( ):
'''simple docstring'''
return __name__.split(""".""" )[0]
def UpperCAmelCase__ ( ):
'''simple docstring'''
return logging.getLogger(_get_library_name() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[str] = None ):
'''simple docstring'''
if name is None:
lowerCAmelCase = _get_library_name()
return logging.getLogger(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return _get_library_root_logger().getEffectiveLevel()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_get_library_root_logger().setLevel(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
return set_verbosity(SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = False
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class lowercase :
def __init__( self , *lowercase , **lowercase ) -> List[str]: # pylint: disable=unused-argument
lowerCAmelCase = args[0] if args else None
def __iter__( self ) -> List[str]:
return iter(self._iterator )
def __getattr__( self , lowercase ) -> Union[str, Any]:
def empty_fn(*lowercase , **lowercase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ) -> Dict:
return self
def __exit__( self , lowercase , lowercase , lowercase ) -> int:
return
SCREAMING_SNAKE_CASE__ = True
class lowercase :
def __call__( self , *lowercase , lowercase=False , **lowercase ) -> Union[str, Any]:
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*lowercase , **lowercase )
else:
return EmptyTqdm(*lowercase , **lowercase )
def _snake_case ( self , *lowercase , **lowercase ) -> int:
lowerCAmelCase = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowercase , **lowercase )
def _snake_case ( self ) -> Optional[Any]:
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
SCREAMING_SNAKE_CASE__ = _tqdm_cls()
def UpperCAmelCase__ ( ):
'''simple docstring'''
global _tqdm_active
return bool(_tqdm_active )
def UpperCAmelCase__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = True
def UpperCAmelCase__ ( ):
'''simple docstring'''
global _tqdm_active
lowerCAmelCase = False
| 393
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> int:
lowerCAmelCase = tempfile.mkdtemp()
lowerCAmelCase = BlipImageProcessor()
lowerCAmelCase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
lowerCAmelCase = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
lowerCAmelCase = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _snake_case ( self , **lowercase ) -> Dict:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _snake_case ( self , **lowercase ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _snake_case ( self , **lowercase ) -> Any:
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _snake_case ( self ) -> str:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCAmelCase = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ) -> Any:
lowerCAmelCase = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
lowerCAmelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCAmelCase = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
lowerCAmelCase = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = image_processor(lowercase , return_tensors="""np""" )
lowerCAmelCase = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = processor(text=lowercase )
lowerCAmelCase = tokenizer(lowercase , return_token_type_ids=lowercase )
lowerCAmelCase = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _snake_case ( self ) -> Any:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _snake_case ( self ) -> str:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCAmelCase = processor.batch_decode(lowercase )
lowerCAmelCase = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _snake_case ( self ) -> int:
lowerCAmelCase = self.get_image_processor()
lowerCAmelCase = self.get_tokenizer()
lowerCAmelCase = self.get_qformer_tokenizer()
lowerCAmelCase = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
lowerCAmelCase = """lower newer"""
lowerCAmelCase = self.prepare_image_inputs()
lowerCAmelCase = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
| 393
| 1
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
__a = {
'albert-base-v1': 'https://huggingface.co/albert-base-v1/resolve/main/config.json',
'albert-large-v1': 'https://huggingface.co/albert-large-v1/resolve/main/config.json',
'albert-xlarge-v1': 'https://huggingface.co/albert-xlarge-v1/resolve/main/config.json',
'albert-xxlarge-v1': 'https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json',
'albert-base-v2': 'https://huggingface.co/albert-base-v2/resolve/main/config.json',
'albert-large-v2': 'https://huggingface.co/albert-large-v2/resolve/main/config.json',
'albert-xlarge-v2': 'https://huggingface.co/albert-xlarge-v2/resolve/main/config.json',
'albert-xxlarge-v2': 'https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json',
}
class __a( _a ):
"""simple docstring"""
lowerCAmelCase = '''albert'''
def __init__( self ,_SCREAMING_SNAKE_CASE=30_000 ,_SCREAMING_SNAKE_CASE=128 ,_SCREAMING_SNAKE_CASE=4_096 ,_SCREAMING_SNAKE_CASE=12 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE=64 ,_SCREAMING_SNAKE_CASE=16_384 ,_SCREAMING_SNAKE_CASE=1 ,_SCREAMING_SNAKE_CASE="gelu_new" ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=512 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=0.02 ,_SCREAMING_SNAKE_CASE=1e-12 ,_SCREAMING_SNAKE_CASE=0.1 ,_SCREAMING_SNAKE_CASE="absolute" ,_SCREAMING_SNAKE_CASE=0 ,_SCREAMING_SNAKE_CASE=2 ,_SCREAMING_SNAKE_CASE=3 ,**_SCREAMING_SNAKE_CASE ,) -> Any:
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE ,bos_token_id=_SCREAMING_SNAKE_CASE ,eos_token_id=_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : Optional[int] = embedding_size
UpperCAmelCase_ : List[str] = hidden_size
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : Union[str, Any] = num_hidden_groups
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : Dict = inner_group_num
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : int = hidden_dropout_prob
UpperCAmelCase_ : List[str] = attention_probs_dropout_prob
UpperCAmelCase_ : Optional[int] = max_position_embeddings
UpperCAmelCase_ : Optional[int] = type_vocab_size
UpperCAmelCase_ : Optional[int] = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
UpperCAmelCase_ : Optional[int] = classifier_dropout_prob
UpperCAmelCase_ : Any = position_embedding_type
class __a( _a ):
"""simple docstring"""
@property
def a__ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase_ : str = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
UpperCAmelCase_ : Union[str, Any] = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 30
|
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
_lowerCAmelCase = HUGGINGFACE_HUB_CACHE
_lowerCAmelCase = """config.json"""
_lowerCAmelCase = """diffusion_pytorch_model.bin"""
_lowerCAmelCase = """diffusion_flax_model.msgpack"""
_lowerCAmelCase = """model.onnx"""
_lowerCAmelCase = """diffusion_pytorch_model.safetensors"""
_lowerCAmelCase = """weights.pb"""
_lowerCAmelCase = """https://huggingface.co"""
_lowerCAmelCase = default_cache_path
_lowerCAmelCase = """diffusers_modules"""
_lowerCAmelCase = os.getenv("""HF_MODULES_CACHE""", os.path.join(hf_cache_home, """modules"""))
_lowerCAmelCase = ["""fp16""", """non-ema"""]
_lowerCAmelCase = """.self_attn"""
| 569
| 0
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
UpperCamelCase__ : Optional[Any] =ViTImageProcessor if is_vision_available() else None
@property
def __a ( self :str) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def __a ( self :str) -> Tuple:
UpperCAmelCase_ = (3, 32, 128)
UpperCAmelCase_ = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
UpperCAmelCase_ = dict(zip(_lowercase , range(len(_lowercase))))
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''])
with open(self.vocab_file , '''w''' , encoding='''utf-8''') as fp:
fp.write(json.dumps(_lowercase) + '''\n''')
UpperCAmelCase_ = {
'''do_normalize''': False,
'''do_resize''': True,
'''image_processor_type''': '''ViTImageProcessor''',
'''resample''': 3,
'''size''': {'''height''': 32, '''width''': 128},
}
UpperCAmelCase_ = os.path.join(self.tmpdirname , _lowercase)
with open(self.image_processor_file , '''w''' , encoding='''utf-8''') as fp:
json.dump(_lowercase , _lowercase)
def __a ( self :int , **_lowercase :Optional[int]) -> Union[str, Any]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_lowercase)
def __a ( self :Union[str, Any] , **_lowercase :Dict) -> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_lowercase)
def __a ( self :Optional[Any]) -> Any:
shutil.rmtree(self.tmpdirname)
def __a ( self :Optional[Any]) -> Tuple:
UpperCAmelCase_ = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta)
UpperCAmelCase_ = Image.fromarray(np.moveaxis(_lowercase , 0 , -1))
return image_input
def __a ( self :Union[str, Any]) -> Optional[int]:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __a ( self :str) -> int:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
processor.save_pretrained(self.tmpdirname)
UpperCAmelCase_ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''')
UpperCAmelCase_ = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0)
UpperCAmelCase_ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_lowercase , padding_value=1.0)
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.char_tokenizer , _lowercase)
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor , _lowercase)
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = image_processor(_lowercase , return_tensors='''np''')
UpperCAmelCase_ = processor(images=_lowercase , return_tensors='''np''')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2)
def __a ( self :List[Any]) -> Tuple:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
UpperCAmelCase_ = '''test'''
UpperCAmelCase_ = processor(text=_lowercase)
UpperCAmelCase_ = tokenizer(_lowercase)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key])
def __a ( self :List[Any]) -> str:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
UpperCAmelCase_ = '''test'''
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , ['''pixel_values''', '''labels'''])
# test if it raises when no input is passed
with pytest.raises(_lowercase):
processor()
def __a ( self :Optional[Any]) -> Optional[int]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
UpperCAmelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ = processor.char_decode(_lowercase)
UpperCAmelCase_ = tokenizer.batch_decode(_lowercase)
UpperCAmelCase_ = [seq.replace(''' ''' , '''''') for seq in decoded_tok]
self.assertListEqual(_lowercase , _lowercase)
def __a ( self :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
UpperCAmelCase_ = None
UpperCAmelCase_ = self.prepare_image_inputs()
UpperCAmelCase_ = processor(text=_lowercase , images=_lowercase)
self.assertListEqual(list(inputs.keys()) , processor.model_input_names)
def __a ( self :Optional[int]) -> str:
UpperCAmelCase_ = self.get_image_processor()
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = MgpstrProcessor(tokenizer=_lowercase , image_processor=_lowercase)
UpperCAmelCase_ = torch.randn(1 , 27 , 38)
UpperCAmelCase_ = torch.randn(1 , 27 , 50257)
UpperCAmelCase_ = torch.randn(1 , 27 , 30522)
UpperCAmelCase_ = processor.batch_decode([char_input, bpe_input, wp_input])
self.assertListEqual(list(results.keys()) , ['''generated_text''', '''scores''', '''char_preds''', '''bpe_preds''', '''wp_preds'''])
| 707
|
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = int(__UpperCAmelCase )
# Initialize Result
UpperCAmelCase_ = []
# Traverse through all denomination
for denomination in reversed(__UpperCAmelCase ):
# Find denominations
while int(__UpperCAmelCase ) >= int(__UpperCAmelCase ):
total_value -= int(__UpperCAmelCase )
answer.append(__UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCamelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
UpperCamelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
UpperCamelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f"Following is minimal change for {value}: ")
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 561
| 0
|
'''simple docstring'''
from math import factorial
def a__ ( UpperCamelCase_ : int, UpperCamelCase_ : int, UpperCamelCase_ : float ):
if successes > trials:
raise ValueError('''successes must be lower or equal to trials''' )
if trials < 0 or successes < 0:
raise ValueError('''the function is defined for non-negative integers''' )
if not isinstance(UpperCamelCase_, UpperCamelCase_ ) or not isinstance(UpperCamelCase_, UpperCamelCase_ ):
raise ValueError('''the function is defined for non-negative integers''' )
if not 0 < prob < 1:
raise ValueError('''prob has to be in range of 1 - 0''' )
UpperCAmelCase__ :Tuple = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
UpperCAmelCase__ :Optional[int] = float(factorial(UpperCamelCase_ ) )
coefficient /= factorial(UpperCamelCase_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('''Probability of 2 successes out of 4 trails''')
print('''with probability of 0.75 is:''', end=''' ''')
print(binomial_distribution(2, 4, 0.75))
| 467
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : List[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
class UpperCAmelCase ( metaclass=_snake_case ):
UpperCAmelCase = ["speech"]
def __init__( self : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[str] ):
requires_backends(self , ['''speech'''] )
| 467
| 1
|
'''simple docstring'''
from math import sqrt
def __UpperCamelCase ( a : int ) ->int:
snake_case = 0
for i in range(1 , int(sqrt(a ) + 1 ) ):
if n % i == 0 and i != sqrt(a ):
total += i + n // i
elif i == sqrt(a ):
total += i
return total - n
def __UpperCamelCase ( a : int = 1_0000 ) ->int:
snake_case = sum(
i
for i in range(1 , a )
if sum_of_divisors(sum_of_divisors(a ) ) == i and sum_of_divisors(a ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 44
|
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowercase ( metaclass=__a ):
_UpperCAmelCase = ['''transformers''', '''torch''', '''note_seq''']
def __init__( self , *A__ , **A__ ) -> Union[str, Any]:
requires_backends(self , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Optional[Any]:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
@classmethod
def UpperCamelCase ( cls , *A__ , **A__ ) -> Any:
requires_backends(cls , ['''transformers''', '''torch''', '''note_seq'''] )
| 44
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
snake_case__ = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case__ = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
snake_case__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 395
|
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
snake_case__ = ['''text''', '''image''', '''audio''']
def lowerCamelCase__ ( a : List[str] ) -> Tuple:
"""simple docstring"""
a__ :Any = []
for input_type in input_types:
if input_type == "text":
inputs.append("Text input" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("fixtures/tests_samples/COCO" ) ) / "000000039769.png" ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_000 ) )
elif isinstance(a , a ):
inputs.append(create_inputs(a ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def lowerCamelCase__ ( a : List ) -> str:
"""simple docstring"""
a__ :Any = []
for output in outputs:
if isinstance(a , (str, AgentText) ):
output_types.append("text" )
elif isinstance(a , (Image.Image, AgentImage) ):
output_types.append("image" )
elif isinstance(a , (torch.Tensor, AgentAudio) ):
output_types.append("audio" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class lowerCAmelCase_ :
def _snake_case ( self : str ) ->Tuple:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "inputs" ) )
self.assertTrue(hasattr(self.tool , "outputs" ) )
a__ :Any = self.tool.inputs
for _input in inputs:
if isinstance(_input , __A ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
a__ :Optional[int] = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def _snake_case ( self : List[Any] ) ->Any:
"""simple docstring"""
a__ :Union[str, Any] = create_inputs(self.tool.inputs )
a__ :Dict = self.tool(*__A )
# There is a single output
if len(self.tool.outputs ) == 1:
a__ :Optional[Any] = [outputs]
self.assertListEqual(output_types(__A ) , self.tool.outputs )
def _snake_case ( self : Tuple ) ->Dict:
"""simple docstring"""
self.assertTrue(hasattr(self.tool , "description" ) )
self.assertTrue(hasattr(self.tool , "default_checkpoint" ) )
self.assertTrue(self.tool.description.startswith("This is a tool that" ) )
def _snake_case ( self : str ) ->List[Any]:
"""simple docstring"""
a__ :Any = create_inputs(self.tool.inputs )
a__ :Union[str, Any] = self.tool(*__A )
if not isinstance(__A , __A ):
a__ :List[str] = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) )
for output, output_type in zip(__A , self.tool.outputs ):
a__ :Any = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(__A , __A ) )
def _snake_case ( self : Dict ) ->List[str]:
"""simple docstring"""
a__ :Tuple = create_inputs(self.tool.inputs )
a__ :Tuple = []
for _input, input_type in zip(__A , self.tool.inputs ):
if isinstance(__A , __A ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
a__ :int = self.tool(*__A )
if not isinstance(__A , __A ):
a__ :Any = [outputs]
self.assertEqual(len(__A ) , len(self.tool.outputs ) )
| 395
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase_ = {
"""configuration_albert""": ["""ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """AlbertConfig""", """AlbertOnnxConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""AlbertTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["""AlbertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AlbertForMaskedLM""",
"""AlbertForMultipleChoice""",
"""AlbertForPreTraining""",
"""AlbertForQuestionAnswering""",
"""AlbertForSequenceClassification""",
"""AlbertForTokenClassification""",
"""AlbertModel""",
"""AlbertPreTrainedModel""",
"""load_tf_weights_in_albert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAlbertForMaskedLM""",
"""TFAlbertForMultipleChoice""",
"""TFAlbertForPreTraining""",
"""TFAlbertForQuestionAnswering""",
"""TFAlbertForSequenceClassification""",
"""TFAlbertForTokenClassification""",
"""TFAlbertMainLayer""",
"""TFAlbertModel""",
"""TFAlbertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"""FlaxAlbertForMaskedLM""",
"""FlaxAlbertForMultipleChoice""",
"""FlaxAlbertForPreTraining""",
"""FlaxAlbertForQuestionAnswering""",
"""FlaxAlbertForSequenceClassification""",
"""FlaxAlbertForTokenClassification""",
"""FlaxAlbertModel""",
"""FlaxAlbertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 703
|
import math
import sys
import cva
import numpy as np
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# For applying gaussian function for each element in matrix.
__SCREAMING_SNAKE_CASE : Dict = math.sqrt(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# Creates a gaussian kernel of given dimension.
__SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , snake_case ):
for j in range(0 , snake_case ):
__SCREAMING_SNAKE_CASE : Dict = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(snake_case , snake_case )
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = np.zeros(img.shape )
__SCREAMING_SNAKE_CASE : int = get_gauss_kernel(snake_case , snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__SCREAMING_SNAKE_CASE : Tuple = get_slice(snake_case , snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = img_s - img_s[kernel_size // 2, kernel_size // 2]
__SCREAMING_SNAKE_CASE : Dict = vec_gaussian(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = np.multiply(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = np.multiply(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : int = np.sum(snake_case ) / np.sum(snake_case )
__SCREAMING_SNAKE_CASE : Dict = val
return imga
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = args[1] if args[1:] else '''../image_data/lena.jpg'''
__SCREAMING_SNAKE_CASE : Dict = float(args[2] ) if args[2:] else 1.0
__SCREAMING_SNAKE_CASE : List[Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__SCREAMING_SNAKE_CASE : List[Any] = int(args[4] )
__SCREAMING_SNAKE_CASE : Tuple = kernel_size + abs(kernel_size % 2 - 1 )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase_ , lowercase_ , lowercase_ , lowercase_ = parse_args(sys.argv)
lowercase_ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
lowercase_ = img / 255
lowercase_ = out.astype("""float32""")
lowercase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase_ = out * 255
lowercase_ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 131
| 0
|
import argparse
from collections import defaultdict
def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : str, lowerCAmelCase_ : int, lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(lowerCAmelCase_, 'r' ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = F"""class {class_name}("""
__lowerCAmelCase = F"""{4 * " "}def {test_name}("""
__lowerCAmelCase = F"""{8 * " "}{correct_line.split()[0]}"""
__lowerCAmelCase = F"""{16 * " "}{correct_line.split()[0]}"""
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = 0
__lowerCAmelCase = 0
__lowerCAmelCase = []
for line in lines:
if line.startswith(lowerCAmelCase_ ):
__lowerCAmelCase = True
elif in_class and line.startswith(lowerCAmelCase_ ):
__lowerCAmelCase = True
elif in_class and in_func and (line.startswith(lowerCAmelCase_ ) or line.startswith(lowerCAmelCase_ )):
__lowerCAmelCase = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
__lowerCAmelCase = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
__lowerCAmelCase = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
__lowerCAmelCase = __lowerCAmelCase = __lowerCAmelCase = __lowerCAmelCase = False
else:
new_lines.append(lowerCAmelCase_ )
with open(lowerCAmelCase_, 'w' ) as f:
for line in new_lines:
f.write(lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Any=None ):
if fail is not None:
with open(lowerCAmelCase_, 'r' ) as f:
__lowerCAmelCase = {l.strip() for l in f.readlines()}
else:
__lowerCAmelCase = None
with open(lowerCAmelCase_, 'r' ) as f:
__lowerCAmelCase = f.readlines()
__lowerCAmelCase = defaultdict(lowerCAmelCase_ )
for line in correct_lines:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_snake_case : str = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 53
|
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_snake_case : List[Any] = logging.get_logger(__name__)
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = ["""pixel_values"""]
def __init__( self : Optional[int] , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Dict[str, int] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[int, float] = 1 / 2_5_5 , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **lowerCAmelCase_ : Any , ) -> None:
super().__init__(**lowerCAmelCase_ )
__lowerCAmelCase = size if size is not None else {'shortest_edge': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = do_resize
__lowerCAmelCase = size
__lowerCAmelCase = resample
__lowerCAmelCase = do_center_crop
__lowerCAmelCase = crop_size
__lowerCAmelCase = do_rescale
__lowerCAmelCase = rescale_factor
__lowerCAmelCase = do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : Optional[int] , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__lowerCAmelCase = int((2_5_6 / 2_2_4) * size['shortest_edge'] )
__lowerCAmelCase = get_resize_output_image_size(lowerCAmelCase_ , size=lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
lowerCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : str , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Dict[str, int] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : str , ) -> np.ndarray:
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(lowerCAmelCase_ , size=(size['height'], size['width']) , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Dict , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[int, float] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : int , ) -> np.ndarray:
return rescale(lowerCAmelCase_ , scale=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : int , lowerCAmelCase_ : np.ndarray , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Union[float, List[float]] , lowerCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase_ : List[str] , ) -> np.ndarray:
return normalize(lowerCAmelCase_ , mean=lowerCAmelCase_ , std=lowerCAmelCase_ , data_format=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : ImageInput , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : PILImageResampling = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Dict[str, int]] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[float] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , lowerCAmelCase_ : Optional[TensorType] = None , lowerCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase_ : str , ) -> BatchFeature:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = resample if resample is not None else self.resample
__lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
__lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
__lowerCAmelCase = image_std if image_std is not None else self.image_std
__lowerCAmelCase = size if size is not None else self.size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , default_to_square=lowerCAmelCase_ )
__lowerCAmelCase = crop_size if crop_size is not None else self.crop_size
__lowerCAmelCase = get_size_dict(lowerCAmelCase_ , param_name='crop_size' )
__lowerCAmelCase = make_list_of_images(lowerCAmelCase_ )
if not valid_images(lowerCAmelCase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(lowerCAmelCase_ ) for image in images]
if do_resize:
__lowerCAmelCase = [self.resize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_center_crop:
__lowerCAmelCase = [self.center_crop(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
if do_normalize:
__lowerCAmelCase = [self.normalize(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(lowerCAmelCase_ , lowerCAmelCase_ ) for image in images]
__lowerCAmelCase = {'pixel_values': images}
return BatchFeature(data=lowerCAmelCase_ , tensor_type=lowerCAmelCase_ )
| 53
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = "▁"
_lowerCAmelCase = {"vocab_file": "spiece.model"}
_lowerCAmelCase = {
"vocab_file": {
"google/reformer-crime-and-punishment": (
"https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"
)
}
}
_lowerCAmelCase = {
"google/reformer-crime-and-punishment": 524288,
}
class _SCREAMING_SNAKE_CASE ( __a ):
__SCREAMING_SNAKE_CASE :int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE :int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE :str = ["""input_ids""", """attention_mask"""]
def __init__( self : List[str] , a__ : Dict , a__ : Optional[int]="</s>" , a__ : List[Any]="<unk>" , a__ : str=[] , a__ : Optional[Dict[str, Any]] = None , **a__ : List[Any] , ):
__magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=a__ , unk_token=a__ , additional_special_tokens=a__ , sp_model_kwargs=self.sp_model_kwargs , **a__ , )
__magic_name__ = vocab_file
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a__ )
@property
def snake_case__ ( self : List[Any] ):
return self.sp_model.get_piece_size()
def snake_case__ ( self : List[str] ):
__magic_name__ = {self.convert_ids_to_tokens(a__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
__magic_name__ = self.__dict__.copy()
__magic_name__ = None
return state
def __setstate__( self : Tuple , a__ : str ):
__magic_name__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__magic_name__ = {}
__magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def snake_case__ ( self : List[str] , a__ : str ):
return self.sp_model.encode(a__ , out_type=a__ )
def snake_case__ ( self : Optional[Any] , a__ : Tuple ):
return self.sp_model.piece_to_id(a__ )
def snake_case__ ( self : int , a__ : Optional[int] ):
if index < self.sp_model.get_piece_size():
__magic_name__ = self.sp_model.IdToPiece(a__ )
return token
def snake_case__ ( self : Tuple , a__ : Union[str, Any] ):
__magic_name__ = []
__magic_name__ = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(a__ ) + token
__magic_name__ = []
else:
current_sub_tokens.append(a__ )
out_string += self.sp_model.decode(a__ )
return out_string.strip()
def snake_case__ ( self : Any , a__ : str , a__ : Optional[str] = None ):
if not os.path.isdir(a__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
__magic_name__ = os.path.join(
a__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a__ )
elif not os.path.isfile(self.vocab_file ):
with open(a__ , '''wb''' ) as fi:
__magic_name__ = self.sp_model.serialized_model_proto()
fi.write(a__ )
return (out_vocab_file,)
| 708
|
'''simple docstring'''
from __future__ import annotations
def UpperCamelCase ( a ) -> bool:
'''simple docstring'''
return len(set(a ) ) == len(a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 245
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
A_ = random.Random()
def _UpperCamelCase ( A , A=1.0 , A=None , A=None ):
if rng is None:
UpperCamelCase_ =global_rng
UpperCamelCase_ =[]
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: int=7 , UpperCamelCase_: str=400 , UpperCamelCase_: Optional[Any]=2000 , UpperCamelCase_: Dict=24 , UpperCamelCase_: Tuple=24 , UpperCamelCase_: Optional[int]=0.0 , UpperCamelCase_: List[str]=1_6000 , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: List[Any]=True , ):
UpperCamelCase_ =parent
UpperCamelCase_ =batch_size
UpperCamelCase_ =min_seq_length
UpperCamelCase_ =max_seq_length
UpperCamelCase_ =(self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCamelCase_ =feature_size
UpperCamelCase_ =num_mel_bins
UpperCamelCase_ =padding_value
UpperCamelCase_ =sampling_rate
UpperCamelCase_ =return_attention_mask
UpperCamelCase_ =do_normalize
def UpperCamelCase__ ( self: List[Any] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCamelCase__ ( self: int , UpperCamelCase_: Optional[Any]=False , UpperCamelCase_: int=False ):
def _flatten(UpperCamelCase_: Optional[Any] ):
return list(itertools.chain(*__lowercase ) )
if equal_length:
UpperCamelCase_ =[floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCamelCase_ =[
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
UpperCamelCase_ =[np.asarray(__lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __lowerCAmelCase ( UpperCAmelCase_ , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =SpeechaTextFeatureExtractionTester(self )
def UpperCamelCase__ ( self: List[Any] , UpperCamelCase_: List[Any] ):
self.assertTrue(np.all(np.mean(__lowercase , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__lowercase , axis=0 ) - 1 ) < 1e-3 ) )
def UpperCamelCase__ ( self: str ):
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =[np.asarray(__lowercase ) for speech_input in speech_inputs]
# Test feature size
UpperCamelCase_ =feature_extractor(__lowercase , padding=__lowercase , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
UpperCamelCase_ =feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
UpperCamelCase_ =feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test batched
UpperCamelCase_ =feature_extractor(__lowercase , return_tensors="np" ).input_features
UpperCamelCase_ =feature_extractor(__lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCamelCase_ =[floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCamelCase_ =np.asarray(__lowercase )
UpperCamelCase_ =feature_extractor(__lowercase , return_tensors="np" ).input_features
UpperCamelCase_ =feature_extractor(__lowercase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__lowercase , __lowercase ):
self.assertTrue(np.allclose(__lowercase , __lowercase , atol=1e-3 ) )
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =['''longest''', '''max_length''', '''do_not_pad''']
UpperCamelCase_ =[None, 16, None]
for max_length, padding in zip(__lowercase , __lowercase ):
UpperCamelCase_ =feature_extractor(
__lowercase , padding=__lowercase , max_length=__lowercase , return_attention_mask=__lowercase )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =[np.sum(__lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase__ ( self: int ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =['''longest''', '''max_length''', '''do_not_pad''']
UpperCamelCase_ =[None, 16, None]
for max_length, padding in zip(__lowercase , __lowercase ):
UpperCamelCase_ =feature_extractor(
__lowercase , max_length=__lowercase , padding=__lowercase , return_tensors="np" , return_attention_mask=__lowercase )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =[np.sum(__lowercase ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCamelCase__ ( self: List[Any] ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =feature_extractor(
__lowercase , padding="max_length" , max_length=4 , truncation=__lowercase , return_tensors="np" , return_attention_mask=__lowercase , )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCamelCase__ ( self: Dict ):
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =feature_extractor(
__lowercase , padding="longest" , max_length=4 , truncation=__lowercase , return_tensors="np" , return_attention_mask=__lowercase , )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
UpperCamelCase_ =[floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
UpperCamelCase_ =feature_extractor(
__lowercase , padding="longest" , max_length=16 , truncation=__lowercase , return_tensors="np" , return_attention_mask=__lowercase , )
UpperCamelCase_ =inputs.input_features
UpperCamelCase_ =inputs.attention_mask
UpperCamelCase_ =np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def UpperCamelCase__ ( self: Any ):
import torch
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =np.random.rand(100 , 32 ).astype(np.floataa )
UpperCamelCase_ =np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCamelCase_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCamelCase_ =feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCamelCase__ ( self: str , UpperCamelCase_: Optional[int] ):
from datasets import load_dataset
UpperCamelCase_ =load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
UpperCamelCase_ =ds.sort("id" ).select(range(__lowercase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def UpperCamelCase__ ( self: Optional[int] ):
# fmt: off
UpperCamelCase_ =np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
UpperCamelCase_ =self._load_datasamples(1 )
UpperCamelCase_ =self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCamelCase_ =feature_extractor(__lowercase , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , __lowercase , atol=1e-4 ) )
| 391
|
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowercase = logging.get_logger(__name__)
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
print('''Loading config file...''' )
def flatten_yaml_as_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE="" , SCREAMING_SNAKE_CASE="." ):
__UpperCamelCase :Dict = []
for k, v in d.items():
__UpperCamelCase :List[Any] = parent_key + sep + k if parent_key else k
if isinstance(SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , sep=SCREAMING_SNAKE_CASE ).items() )
else:
items.append((new_key, v) )
return dict(SCREAMING_SNAKE_CASE )
__UpperCamelCase :str = argparse.Namespace()
with open(SCREAMING_SNAKE_CASE , '''r''' ) as yaml_file:
try:
__UpperCamelCase :Dict = yaml.load(SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader )
__UpperCamelCase :int = flatten_yaml_as_dict(SCREAMING_SNAKE_CASE )
for k, v in flat_cfg.items():
setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
except yaml.YAMLError as exc:
logger.error('''Error while loading config file: {}. Error message: {}'''.format(SCREAMING_SNAKE_CASE , str(SCREAMING_SNAKE_CASE ) ) )
return config
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :str = MobileViTVaConfig()
__UpperCamelCase :str = False
# dataset
if task_name.startswith('''imagenet1k_''' ):
__UpperCamelCase :str = 1_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase :List[Any] = 384
else:
__UpperCamelCase :Dict = 256
__UpperCamelCase :List[Any] = '''imagenet-1k-id2label.json'''
elif task_name.startswith('''imagenet21k_to_1k_''' ):
__UpperCamelCase :str = 21_000
if int(task_name.strip().split('''_''' )[-1] ) == 384:
__UpperCamelCase :Optional[Any] = 384
else:
__UpperCamelCase :Optional[Any] = 256
__UpperCamelCase :Optional[Any] = '''imagenet-22k-id2label.json'''
elif task_name.startswith('''ade20k_''' ):
__UpperCamelCase :List[Any] = 151
__UpperCamelCase :List[Any] = 512
__UpperCamelCase :List[str] = '''ade20k-id2label.json'''
__UpperCamelCase :str = True
elif task_name.startswith('''voc_''' ):
__UpperCamelCase :Union[str, Any] = 21
__UpperCamelCase :Optional[Any] = 512
__UpperCamelCase :int = '''pascal-voc-id2label.json'''
__UpperCamelCase :Optional[Any] = True
# orig_config
__UpperCamelCase :Optional[Any] = load_orig_config_file(SCREAMING_SNAKE_CASE )
assert getattr(SCREAMING_SNAKE_CASE , '''model.classification.name''' , -1 ) == "mobilevit_v2", "Invalid model"
__UpperCamelCase :Tuple = getattr(SCREAMING_SNAKE_CASE , '''model.classification.mitv2.width_multiplier''' , 1.0 )
assert (
getattr(SCREAMING_SNAKE_CASE , '''model.classification.mitv2.attn_norm_layer''' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
__UpperCamelCase :Union[str, Any] = getattr(SCREAMING_SNAKE_CASE , '''model.classification.activation.name''' , '''swish''' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
__UpperCamelCase :Any = getattr(SCREAMING_SNAKE_CASE , '''model.segmentation.output_stride''' , 16 )
if "_deeplabv3" in task_name:
__UpperCamelCase :Tuple = getattr(SCREAMING_SNAKE_CASE , '''model.segmentation.deeplabv3.aspp_rates''' , [12, 24, 36] )
__UpperCamelCase :List[str] = getattr(SCREAMING_SNAKE_CASE , '''model.segmentation.deeplabv3.aspp_out_channels''' , 512 )
__UpperCamelCase :List[str] = getattr(SCREAMING_SNAKE_CASE , '''model.segmentation.deeplabv3.aspp_dropout''' , 0.1 )
# id2label
__UpperCamelCase :Tuple = '''huggingface/label-files'''
__UpperCamelCase :List[str] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) , '''r''' ) )
__UpperCamelCase :Optional[int] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
__UpperCamelCase :Tuple = idalabel
__UpperCamelCase :Optional[Any] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Tuple = dct.pop(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = val
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
if base_model:
__UpperCamelCase :Dict = ''''''
else:
__UpperCamelCase :Optional[Any] = '''mobilevitv2.'''
__UpperCamelCase :int = []
for k in state_dict.keys():
if k[:8] == "encoder.":
__UpperCamelCase :Optional[int] = k[8:]
else:
__UpperCamelCase :Optional[int] = k
if ".block." in k:
__UpperCamelCase :Optional[int] = k_new.replace('''.block.''' , '''.''' )
if ".conv." in k:
__UpperCamelCase :int = k_new.replace('''.conv.''' , '''.convolution.''' )
if ".norm." in k:
__UpperCamelCase :Tuple = k_new.replace('''.norm.''' , '''.normalization.''' )
if "conv_1." in k:
__UpperCamelCase :Any = k_new.replace('''conv_1.''' , f"""{model_prefix}conv_stem.""" )
for i in [1, 2]:
if f"""layer_{i}.""" in k:
__UpperCamelCase :Union[str, Any] = k_new.replace(f"""layer_{i}.""" , f"""{model_prefix}encoder.layer.{i-1}.layer.""" )
if ".exp_1x1." in k:
__UpperCamelCase :Any = k_new.replace('''.exp_1x1.''' , '''.expand_1x1.''' )
if ".red_1x1." in k:
__UpperCamelCase :int = k_new.replace('''.red_1x1.''' , '''.reduce_1x1.''' )
for i in [3, 4, 5]:
if f"""layer_{i}.0.""" in k:
__UpperCamelCase :Dict = k_new.replace(f"""layer_{i}.0.""" , f"""{model_prefix}encoder.layer.{i-1}.downsampling_layer.""" )
if f"""layer_{i}.1.local_rep.0.""" in k:
__UpperCamelCase :Any = k_new.replace(f"""layer_{i}.1.local_rep.0.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_kxk.""" )
if f"""layer_{i}.1.local_rep.1.""" in k:
__UpperCamelCase :Any = k_new.replace(f"""layer_{i}.1.local_rep.1.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_1x1.""" )
for i in [3, 4, 5]:
if i == 3:
__UpperCamelCase :List[Any] = [0, 1]
elif i == 4:
__UpperCamelCase :int = [0, 1, 2, 3]
elif i == 5:
__UpperCamelCase :Any = [0, 1, 2]
for j in j_in:
if f"""layer_{i}.1.global_rep.{j}.""" in k:
__UpperCamelCase :int = k_new.replace(
f"""layer_{i}.1.global_rep.{j}.""" , f"""{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.""" )
if f"""layer_{i}.1.global_rep.{j+1}.""" in k:
__UpperCamelCase :List[Any] = k_new.replace(
f"""layer_{i}.1.global_rep.{j+1}.""" , f"""{model_prefix}encoder.layer.{i-1}.layernorm.""" )
if f"""layer_{i}.1.conv_proj.""" in k:
__UpperCamelCase :List[Any] = k_new.replace(f"""layer_{i}.1.conv_proj.""" , f"""{model_prefix}encoder.layer.{i-1}.conv_projection.""" )
if "pre_norm_attn.0." in k:
__UpperCamelCase :Optional[Any] = k_new.replace('''pre_norm_attn.0.''' , '''layernorm_before.''' )
if "pre_norm_attn.1." in k:
__UpperCamelCase :Tuple = k_new.replace('''pre_norm_attn.1.''' , '''attention.''' )
if "pre_norm_ffn.0." in k:
__UpperCamelCase :Any = k_new.replace('''pre_norm_ffn.0.''' , '''layernorm_after.''' )
if "pre_norm_ffn.1." in k:
__UpperCamelCase :Dict = k_new.replace('''pre_norm_ffn.1.''' , '''ffn.conv1.''' )
if "pre_norm_ffn.3." in k:
__UpperCamelCase :Union[str, Any] = k_new.replace('''pre_norm_ffn.3.''' , '''ffn.conv2.''' )
if "classifier.1." in k:
__UpperCamelCase :Union[str, Any] = k_new.replace('''classifier.1.''' , '''classifier.''' )
if "seg_head." in k:
__UpperCamelCase :str = k_new.replace('''seg_head.''' , '''segmentation_head.''' )
if ".aspp_layer." in k:
__UpperCamelCase :Union[str, Any] = k_new.replace('''.aspp_layer.''' , '''.''' )
if ".aspp_pool." in k:
__UpperCamelCase :Optional[int] = k_new.replace('''.aspp_pool.''' , '''.''' )
rename_keys.append((k, k_new) )
return rename_keys
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = []
for k in state_dict.keys():
if k.startswith('''seg_head.aux_head.''' ):
keys_to_ignore.append(SCREAMING_SNAKE_CASE )
for k in keys_to_ignore:
state_dict.pop(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
__UpperCamelCase :int = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = get_mobilevitva_config(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load original state_dict
__UpperCamelCase :int = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
# load huggingface model
if task_name.startswith('''ade20k_''' ) or task_name.startswith('''voc_''' ):
__UpperCamelCase :Any = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE ).eval()
__UpperCamelCase :str = False
else:
__UpperCamelCase :Optional[int] = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE ).eval()
__UpperCamelCase :List[str] = False
# remove and rename some keys of load the original model
__UpperCamelCase :Union[str, Any] = checkpoint
remove_unused_keys(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Dict = create_rename_keys(SCREAMING_SNAKE_CASE , base_model=SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# load modified state_dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
__UpperCamelCase :Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__UpperCamelCase :Dict = image_processor(images=prepare_img() , return_tensors='''pt''' )
__UpperCamelCase :int = model(**SCREAMING_SNAKE_CASE )
# verify classification model
if task_name.startswith('''imagenet''' ):
__UpperCamelCase :str = outputs.logits
__UpperCamelCase :Optional[int] = logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('''imagenet1k_256''' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
__UpperCamelCase :Optional[Any] = torch.tensor([-1.6_336e00, -7.3_204e-02, -5.1_883e-01] )
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(f"""Saving model {task_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
__lowercase = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 167
| 0
|
'''simple docstring'''
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self , a ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = TypeError(
"""Matrices must be formed from a list of zero or more lists containing at """
"""least one and the same number of values, each of which must be of type """
"""int or float.""" )
if len(a ) != 0:
_UpperCamelCase = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(a ) != cols:
raise error
for value in row:
if not isinstance(a , (int, float) ):
raise error
_UpperCamelCase = rows
else:
_UpperCamelCase = []
def A_ ( self ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def A_ ( self ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def A_ ( self ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def A_ ( self ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def A_ ( self ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def A_ ( self ) -> Matrix:
'''simple docstring'''
_UpperCamelCase = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(a )
def A_ ( self ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def A_ ( self ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def A_ ( self , a , a ) -> int:
'''simple docstring'''
_UpperCamelCase = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(a ).determinant()
def A_ ( self , a , a ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(a , a )
return -1 * self.get_minor(a , a )
def A_ ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(a , a ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def A_ ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def A_ ( self ) -> Matrix:
'''simple docstring'''
_UpperCamelCase = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(a )
def A_ ( self ) -> Matrix:
'''simple docstring'''
_UpperCamelCase = self.determinant()
if not determinant:
raise TypeError("""Only matrices with a non-zero determinant have an inverse""" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"""[""" + """. """.join([str(a ) for value in row] ) + """.]"""
for row in self.rows
] )
+ "]"
)
def A_ ( self , a , a = None ) -> None:
'''simple docstring'''
_UpperCamelCase = TypeError("""Row must be a list containing all ints and/or floats""" )
if not isinstance(a , a ):
raise type_error
for value in row:
if not isinstance(a , (int, float) ):
raise type_error
if len(a ) != self.num_columns:
raise ValueError(
"""Row must be equal in length to the other rows in the matrix""" )
if position is None:
self.rows.append(a )
else:
_UpperCamelCase = self.rows[0:position] + [row] + self.rows[position:]
def A_ ( self , a , a = None ) -> None:
'''simple docstring'''
_UpperCamelCase = TypeError(
"""Column must be a list containing all ints and/or floats""" )
if not isinstance(a , a ):
raise type_error
for value in column:
if not isinstance(a , (int, float) ):
raise type_error
if len(a ) != self.num_rows:
raise ValueError(
"""Column must be equal in length to the other columns in the matrix""" )
if position is None:
_UpperCamelCase = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
_UpperCamelCase = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , a ) -> bool:
'''simple docstring'''
if not isinstance(a , a ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , a ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self , a ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Addition requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , a ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("""Subtraction requires matrices of the same order""" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , a ) -> Matrix:
'''simple docstring'''
if isinstance(a , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(a , a ):
if self.num_columns != other.num_rows:
raise ValueError(
"""The number of columns in the first matrix must """
"""be equal to the number of rows in the second""" )
return Matrix(
[
[Matrix.dot_product(a , a ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"""A Matrix can only be multiplied by an int, float, or another matrix""" )
def __pow__( self , a ) -> Matrix:
'''simple docstring'''
if not isinstance(a , a ):
raise TypeError("""A Matrix can only be raised to the power of an int""" )
if not self.is_square:
raise ValueError("""Only square matrices can be raised to a power""" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"""Only invertable matrices can be raised to a negative power""" )
_UpperCamelCase = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def A_ ( cls , a , a ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(a ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase__ ( __lowercase , __lowercase , __lowercase , unittest.TestCase ):
UpperCamelCase_ : List[Any] = StableDiffusionInpaintPipeline
UpperCamelCase_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
UpperCamelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
UpperCamelCase_ : Tuple = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ : int = frozenset([] )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
_UpperCamelCase = PNDMScheduler(skip_prk_steps=a )
torch.manual_seed(0 )
_UpperCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
_UpperCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
_UpperCamelCase = CLIPTextModel(a )
_UpperCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
_UpperCamelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def A_ ( self , a , a=0 ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(a ) ).to(a )
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_UpperCamelCase = Image.fromarray(np.uinta(a ) ).convert("""RGB""" ).resize((64, 64) )
_UpperCamelCase = Image.fromarray(np.uinta(image + 4 ) ).convert("""RGB""" ).resize((64, 64) )
if str(a ).startswith("""mps""" ):
_UpperCamelCase = torch.manual_seed(a )
else:
_UpperCamelCase = torch.Generator(device=a ).manual_seed(a )
_UpperCamelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": init_image,
"""mask_image""": mask_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
_UpperCamelCase = self.get_dummy_components()
_UpperCamelCase = StableDiffusionInpaintPipeline(**a )
_UpperCamelCase = sd_pipe.to(a )
sd_pipe.set_progress_bar_config(disable=a )
_UpperCamelCase = self.get_dummy_inputs(a )
_UpperCamelCase = sd_pipe(**a ).images
_UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCamelCase = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A_ ( self ) -> Tuple:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench.npy""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(a , safety_checker=a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"""
"""/yellow_cat_sitting_on_a_park_bench_fp16.npy""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
a , torch_dtype=torch.floataa , safety_checker=a , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , output_type="""np""" , )
_UpperCamelCase = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A_ ( self ) -> Dict:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-inpaint/init_image.png""" )
_UpperCamelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png""" )
_UpperCamelCase = """stabilityai/stable-diffusion-2-inpainting"""
_UpperCamelCase = PNDMScheduler.from_pretrained(a , subfolder="""scheduler""" )
_UpperCamelCase = StableDiffusionInpaintPipeline.from_pretrained(
a , safety_checker=a , scheduler=a , torch_dtype=torch.floataa , )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_UpperCamelCase = """Face of a yellow cat, high resolution, sitting on a park bench"""
_UpperCamelCase = torch.manual_seed(0 )
_UpperCamelCase = pipe(
prompt=a , image=a , mask_image=a , generator=a , num_inference_steps=2 , output_type="""np""" , )
_UpperCamelCase = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 202
| 0
|
from __future__ import annotations
import numpy as np
def lowercase__ ( _UpperCamelCase) -> tuple[np.ndarray, np.ndarray]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = np.shape(__snake_case)
if rows != columns:
UpperCamelCase = (
'\'table\' has to be of square shaped array but got a '
F'{rows}x{columns} array:\n{table}'
)
raise ValueError(__snake_case)
UpperCamelCase = np.zeros((rows, columns))
UpperCamelCase = np.zeros((rows, columns))
for i in range(__snake_case):
for j in range(__snake_case):
UpperCamelCase = sum(lower[i][k] * upper[k][j] for k in range(__snake_case))
if upper[j][j] == 0:
raise ArithmeticError('No LU decomposition exists')
UpperCamelCase = (table[i][j] - total) / upper[j][j]
UpperCamelCase = 1
for j in range(__snake_case , __snake_case):
UpperCamelCase = sum(lower[i][k] * upper[k][j] for k in range(__snake_case))
UpperCamelCase = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
|
from itertools import permutations
def lowerCAmelCase__(__snake_case ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCamelCase__ = [7, 11, 13, 17]
for i, test in enumerate(__snake_case ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__(__snake_case = 10 ) -> int:
'''simple docstring'''
return sum(
int(''''''.join(map(__snake_case ,__snake_case ) ) )
for num in permutations(range(__snake_case ) )
if is_substring_divisible(__snake_case ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 481
| 0
|
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Tuple = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = "data2vec-vision"
def __init__( self: Dict , UpperCamelCase: int=7_68 , UpperCamelCase: Any=12 , UpperCamelCase: Dict=12 , UpperCamelCase: Tuple=30_72 , UpperCamelCase: Any="gelu" , UpperCamelCase: List[str]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Dict=0.02 , UpperCamelCase: List[Any]=1e-12 , UpperCamelCase: Optional[int]=2_24 , UpperCamelCase: int=16 , UpperCamelCase: List[str]=3 , UpperCamelCase: Dict=False , UpperCamelCase: Union[str, Any]=False , UpperCamelCase: Tuple=False , UpperCamelCase: List[str]=False , UpperCamelCase: Any=0.1 , UpperCamelCase: List[str]=0.1 , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[str]=[3, 5, 7, 11] , UpperCamelCase: Tuple=[1, 2, 3, 6] , UpperCamelCase: Any=True , UpperCamelCase: List[Any]=0.4 , UpperCamelCase: Optional[int]=2_56 , UpperCamelCase: Optional[int]=1 , UpperCamelCase: str=False , UpperCamelCase: List[Any]=2_55 , **UpperCamelCase: Optional[Any] , ) -> Optional[Any]:
super().__init__(**UpperCamelCase )
snake_case__ = hidden_size
snake_case__ = num_hidden_layers
snake_case__ = num_attention_heads
snake_case__ = intermediate_size
snake_case__ = hidden_act
snake_case__ = hidden_dropout_prob
snake_case__ = attention_probs_dropout_prob
snake_case__ = initializer_range
snake_case__ = layer_norm_eps
snake_case__ = image_size
snake_case__ = patch_size
snake_case__ = num_channels
snake_case__ = use_mask_token
snake_case__ = use_absolute_position_embeddings
snake_case__ = use_relative_position_bias
snake_case__ = use_shared_relative_position_bias
snake_case__ = layer_scale_init_value
snake_case__ = drop_path_rate
snake_case__ = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ = out_indices
snake_case__ = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ = use_auxiliary_head
snake_case__ = auxiliary_loss_weight
snake_case__ = auxiliary_channels
snake_case__ = auxiliary_num_convs
snake_case__ = auxiliary_concat_input
snake_case__ = semantic_loss_ignore_index
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = version.parse("1.11" )
@property
def lowerCAmelCase_ ( self: Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCAmelCase_ ( self: Dict ) -> float:
return 1e-4
| 372
|
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__UpperCamelCase : Optional[Any] = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class __SCREAMING_SNAKE_CASE( unittest.TestCase ):
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: Path , UpperCamelCase: Union[str, None] = None , UpperCamelCase: Union[List[str], None] = None , UpperCamelCase: Union[str, List[str], None] = None , UpperCamelCase: bool = True , ) -> int:
snake_case__ = [file for file in os.listdir(UpperCamelCase ) if os.path.isfile(os.path.join(UpperCamelCase , UpperCamelCase ) )]
if identifier is not None:
snake_case__ = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(UpperCamelCase , UpperCamelCase ):
for n_ in n_identifier:
snake_case__ = [file for file in files if n_ not in file]
else:
snake_case__ = [file for file in files if n_identifier not in file]
snake_case__ = ignore_files or []
ignore_files.append('__init__.py' )
snake_case__ = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , UpperCamelCase )
if only_modules:
snake_case__ = file.split('.' )[0]
try:
snake_case__ = getattr(UpperCamelCase , UpperCamelCase )
snake_case__ = doctest.DocTestSuite(UpperCamelCase )
snake_case__ = unittest.TextTestRunner().run(UpperCamelCase )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(F'''{module_identifier} is not a module.''' )
else:
snake_case__ = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def lowerCAmelCase_ ( self: Tuple ) -> int:
snake_case__ = Path('src/transformers' )
snake_case__ = 'modeling'
snake_case__ = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase , ignore_files=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[int] ) -> str:
snake_case__ = Path('src/transformers' )
snake_case__ = 'tokenization'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Optional[Any] ) -> int:
snake_case__ = Path('src/transformers' )
snake_case__ = 'configuration'
self.analyze_directory(UpperCamelCase , identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = Path('src/transformers' )
snake_case__ = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(UpperCamelCase , n_identifier=UpperCamelCase )
def lowerCAmelCase_ ( self: Tuple ) -> Union[str, Any]:
snake_case__ = Path('docs/source' )
snake_case__ = ['favicon.ico']
self.analyze_directory(UpperCamelCase , ignore_files=UpperCamelCase , only_modules=UpperCamelCase )
| 372
| 1
|
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class UpperCAmelCase ( __A ):
'''simple docstring'''
def __init__( self , lowercase , lowercase=None , lowercase=None , lowercase=0 ):
"""simple docstring"""
A_ : Tuple = 1.0 if scale is None else scale
A_ : int = 0.0 if loc is None else loc
super().__init__(lowercase , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase )] )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.base_dist.mean * self.scale + self.loc
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.base_dist.variance * self.scale**2
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return self.variance.sqrt()
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase , lowercase , lowercase , **lowercase ):
"""simple docstring"""
super().__init__(**lowercase )
A_ : Optional[Any] = args_dim
A_ : str = nn.ModuleList([nn.Linear(lowercase , lowercase ) for dim in args_dim.values()] )
A_ : Any = domain_map
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ : Any = [proj(lowercase ) for proj in self.proj]
return self.domain_map(*lowercase )
class UpperCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowercase ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = function
def lowerCAmelCase_ ( self , lowercase , *lowercase ):
"""simple docstring"""
return self.function(lowercase , *lowercase )
class UpperCAmelCase :
'''simple docstring'''
lowerCamelCase_ = 42
lowerCamelCase_ = 42
lowerCamelCase_ = 42
def __init__( self , lowercase = 1 ):
"""simple docstring"""
A_ : Optional[Any] = dim
A_ : int = {k: dim * self.args_dim[k] for k in self.args_dim}
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
if self.dim == 1:
return self.distribution_class(*lowercase )
else:
return Independent(self.distribution_class(*lowercase ) , 1 )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None , ):
"""simple docstring"""
A_ : Tuple = self._base_distribution(lowercase )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowercase , loc=lowercase , scale=lowercase , event_dim=self.event_dim )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return () if self.dim == 1 else (self.dim,)
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return len(self.event_shape )
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
return 0.0
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
return ParameterProjection(
in_features=lowercase , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def lowerCAmelCase_ ( self , *lowercase ):
"""simple docstring"""
raise NotImplementedError()
@staticmethod
def lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
return (x + torch.sqrt(torch.square(lowercase ) + 4.0 )) / 2.0
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = {"df": 1, "loc": 1, "scale": 1}
lowerCamelCase_ = StudentT
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase , lowercase ):
"""simple docstring"""
A_ : List[str] = cls.squareplus(lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
A_ : str = 2.0 + cls.squareplus(lowercase )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = {"loc": 1, "scale": 1}
lowerCamelCase_ = Normal
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase ):
"""simple docstring"""
A_ : List[str] = cls.squareplus(lowercase ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = {"total_count": 1, "logits": 1}
lowerCamelCase_ = NegativeBinomial
@classmethod
def lowerCAmelCase_ ( cls , lowercase , lowercase ):
"""simple docstring"""
A_ : List[str] = cls.squareplus(lowercase )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def lowerCAmelCase_ ( self , lowercase ):
"""simple docstring"""
A_ , A_ : List[str] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowercase , logits=lowercase )
else:
return Independent(self.distribution_class(total_count=lowercase , logits=lowercase ) , 1 )
def lowerCAmelCase_ ( self , lowercase , lowercase = None , lowercase = None ):
"""simple docstring"""
A_ , A_ : Optional[int] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) )
| 558
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_UpperCAmelCase = pytest.mark.integration
@pytest.mark.parametrize('path' ,['paws', 'csv'] )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Tuple ):
'''simple docstring'''
inspect_dataset(__lowercase ,__lowercase )
A_ : Optional[Any] = path + '.py'
assert script_name in os.listdir(__lowercase )
assert "__pycache__" not in os.listdir(__lowercase )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' ,['accuracy'] )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Union[str, Any] ):
'''simple docstring'''
inspect_metric(__lowercase ,__lowercase )
A_ : Optional[Any] = path + '.py'
assert script_name in os.listdir(__lowercase )
assert "__pycache__" not in os.listdir(__lowercase )
@pytest.mark.parametrize(
'path, config_name, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : Dict ,__lowercase : Dict ):
'''simple docstring'''
A_ : List[Any] = get_dataset_config_info(__lowercase ,config_name=__lowercase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def UpperCamelCase ( __lowercase : Dict ,__lowercase : List[Any] ,__lowercase : int ):
'''simple docstring'''
with pytest.raises(__lowercase ):
get_dataset_config_info(__lowercase ,config_name=__lowercase )
@pytest.mark.parametrize(
'path, expected' ,[
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] ,)
def UpperCamelCase ( __lowercase : str ,__lowercase : str ):
'''simple docstring'''
A_ : Any = get_dataset_config_names(__lowercase )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' ,[
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : str ,__lowercase : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = get_dataset_infos(__lowercase )
assert list(infos.keys() ) == expected_configs
A_ : Any = expected_configs[0]
assert expected_config in infos
A_ : Tuple = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' ,[
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] ,)
def UpperCamelCase ( __lowercase : Any ,__lowercase : Optional[Any] ,__lowercase : Dict ):
'''simple docstring'''
A_ : Optional[Any] = get_dataset_infos(__lowercase )
assert expected_config in infos
A_ : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' ,[
('paws', None, ValueError),
] ,)
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : Tuple ,__lowercase : str ):
'''simple docstring'''
with pytest.raises(__lowercase ):
get_dataset_split_names(__lowercase ,config_name=__lowercase )
| 558
| 1
|
import sys
A : Any = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def lowercase_ ( _A : Union[str, Any] = N ):
"""simple docstring"""
lowerCamelCase__ : str = -sys.maxsize - 1
for i in range(len(_A ) - 12 ):
lowerCamelCase__ : Optional[int] = 1
for j in range(13 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCamelCase__ : Tuple = product
return largest_product
if __name__ == "__main__":
print(f'{solution() = }')
| 704
|
from __future__ import annotations
def lowercase_ ( _A : str , _A : list[str] | None = None , _A : dict[str, float] | None = None , _A : bool = False , ):
"""simple docstring"""
lowerCamelCase__ : Tuple = cipher_alphabet or [chr(_A ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
lowerCamelCase__ : Dict = {
"a": 0.08_497,
"b": 0.01_492,
"c": 0.02_202,
"d": 0.04_253,
"e": 0.11_162,
"f": 0.02_228,
"g": 0.02_015,
"h": 0.06_094,
"i": 0.07_546,
"j": 0.00_153,
"k": 0.01_292,
"l": 0.04_025,
"m": 0.02_406,
"n": 0.06_749,
"o": 0.07_507,
"p": 0.01_929,
"q": 0.00_095,
"r": 0.07_587,
"s": 0.06_327,
"t": 0.09_356,
"u": 0.02_758,
"v": 0.00_978,
"w": 0.02_560,
"x": 0.00_150,
"y": 0.01_994,
"z": 0.00_077,
}
else:
# Custom frequencies dictionary
lowerCamelCase__ : Optional[int] = frequencies_dict
if not case_sensitive:
lowerCamelCase__ : str = ciphertext.lower()
# Chi squared statistic values
lowerCamelCase__ : dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(_A ) ):
lowerCamelCase__ : Optional[Any] = ""
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
lowerCamelCase__ : Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
_A )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
lowerCamelCase__ : str = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
lowerCamelCase__ : List[str] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : List[str] = decrypted_with_shift.lower().count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : List[Any] = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : str = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
lowerCamelCase__ : Any = decrypted_with_shift.count(_A )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
lowerCamelCase__ : str = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
lowerCamelCase__ : int = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
lowerCamelCase__ : Optional[int] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(_A : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
lowerCamelCase__ : int = min(
_A , key=_A , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : int = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 5
| 0
|
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Dict = 0
for i in range(1 , 1001 ):
total += i**i
return str(_lowercase )[-10:]
if __name__ == "__main__":
print(solution())
| 30
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
_UpperCAmelCase : int = parser.parse_args()
if args.model_type == "roberta":
_UpperCAmelCase : Union[str, Any] = RobertaForMaskedLM.from_pretrained(args.model_name)
_UpperCAmelCase : int = """roberta"""
elif args.model_type == "gpt2":
_UpperCAmelCase : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name)
_UpperCAmelCase : Optional[int] = """transformer"""
_UpperCAmelCase : Tuple = model.state_dict()
_UpperCAmelCase : int = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
_UpperCAmelCase : Optional[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
_UpperCAmelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
_UpperCAmelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
_UpperCAmelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
_UpperCAmelCase : str = state_dict[param_name]
# Transformer Blocks #
_UpperCAmelCase : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
_UpperCAmelCase : str = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
_UpperCAmelCase : Any = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
_UpperCAmelCase : Optional[Any] = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
_UpperCAmelCase : Dict = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
_UpperCAmelCase : int = state_dict[f"""lm_head.dense.{w}"""]
_UpperCAmelCase : int = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
_UpperCAmelCase : List[str] = state_dict[f"""{prefix}.ln_f.{w}"""]
_UpperCAmelCase : Any = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 683
| 0
|
import math
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : int = [True] * n
__snake_case : Optional[int] = False
__snake_case : Dict = False
__snake_case : List[Any] = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
__snake_case : Optional[Any] = i * 2
while index < n:
__snake_case : Union[str, Any] = False
__snake_case : Tuple = index + i
__snake_case : str = [2]
for i in range(3 , SCREAMING_SNAKE_CASE_ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def lowerCAmelCase_ ( __lowerCamelCase = 9_9_9_9_6_6_6_6_3_3_3_3 ):
__snake_case : Dict = math.floor(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) + 1_0_0
__snake_case : Any = prime_sieve(SCREAMING_SNAKE_CASE_ )
__snake_case : int = 0
__snake_case : List[Any] = 0
__snake_case : Tuple = primes[prime_index]
while (last_prime**2) <= limit:
__snake_case : str = primes[prime_index + 1]
__snake_case : Union[str, Any] = last_prime**2
__snake_case : Any = next_prime**2
# Get numbers divisible by lps(current)
__snake_case : Union[str, Any] = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
__snake_case : str = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
__snake_case : str = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
__snake_case : int = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 703
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__snake_case : Dict = str(bin(__lowerCamelCase ) )
binary_number += "0" * shift_amount
return binary_number
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number < 0 or shift_amount < 0:
raise ValueError("both inputs must be positive integers" )
__snake_case : List[str] = str(bin(__lowerCamelCase ) )[2:]
if shift_amount >= len(__lowerCamelCase ):
return "0b0"
__snake_case : Union[str, Any] = binary_number[: len(__lowerCamelCase ) - shift_amount]
return "0b" + shifted_binary_number
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
if number >= 0: # Get binary representation of positive number
__snake_case : Optional[int] = "0" + str(bin(__lowerCamelCase ) ).strip("-" )[2:]
else: # Get binary (2's complement) representation of negative number
__snake_case : Tuple = len(bin(__lowerCamelCase )[3:] ) # Find 2's complement of number
__snake_case : Any = bin(abs(__lowerCamelCase ) - (1 << binary_number_length) )[3:]
__snake_case : Optional[Any] = (
"1" + "0" * (binary_number_length - len(__lowerCamelCase )) + binary_number
)
if shift_amount >= len(__lowerCamelCase ):
return "0b" + binary_number[0] * len(__lowerCamelCase )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__lowerCamelCase ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 203
| 0
|
def A__( __lowerCAmelCase ):
return "".join(chr(ord(__lowerCAmelCase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 304
|
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _lowerCamelCase ( lowercase : Any ) -> List[str]:
return getitem, k
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Union[str, Any] ) -> Any:
return setitem, k, v
def _lowerCamelCase ( lowercase : int ) -> Union[str, Any]:
return delitem, k
def _lowerCamelCase ( lowercase : Tuple , lowercase : Dict , *lowercase : Union[str, Any] ) -> int:
try:
return fun(lowercase , *lowercase ), None
except Exception as e:
return None, e
lowerCAmelCase_ : Optional[Any] = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
lowerCAmelCase_ : Optional[int] = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
lowerCAmelCase_ : int = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
lowerCAmelCase_ : List[Any] = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
lowerCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCAmelCase_ : str = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[int]:
_a = HashMap(initial_block_size=4 )
_a = {}
for _, (fun, *args) in enumerate(lowercase ):
_a , _a = _run_operation(lowercase , lowercase , *lowercase )
_a , _a = _run_operation(lowercase , lowercase , *lowercase )
assert my_res == py_res
assert str(lowercase ) == str(lowercase )
assert set(lowercase ) == set(lowercase )
assert len(lowercase ) == len(lowercase )
assert set(my.items() ) == set(py.items() )
def _lowerCamelCase ( ) -> str:
def is_public(lowercase : str ) -> bool:
return not name.startswith("_" )
_a = {name for name in dir({} ) if is_public(lowercase )}
_a = {name for name in dir(HashMap() ) if is_public(lowercase )}
assert dict_public_names > hash_public_names
| 692
| 0
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
__magic_name__ = [
'''cross_validation.py''',
'''gradient_accumulation.py''',
'''local_sgd.py''',
'''multi_process_metrics.py''',
'''memory.py''',
'''automatic_gradient_accumulation.py''',
'''fsdp_with_peak_mem_tracking.py''',
'''deepspeed_with_config_support.py''',
'''megatron_lm_gpt_pretraining.py''',
]
class a__ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self :str , lowercase__ :str , lowercase__ :bool , lowercase__ :str = None , lowercase__ :list = None ):
lowercase = None
lowercase = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
lowercase = os.path.abspath('examples' )
for item in os.listdir(lowercase__ ):
if item not in EXCLUDE_EXAMPLES:
lowercase = os.path.join(lowercase__ , lowercase__ )
if os.path.isfile(lowercase__ ) and ".py" in item_path:
with self.subTest(
tested_script=lowercase__ , feature_script=lowercase__ , tested_section='main()' if parser_only else 'training_function()' , ):
lowercase = compare_against_test(
os.path.join(lowercase__ , lowercase__ ) , lowercase__ , lowercase__ , lowercase__ )
lowercase = '\n'.join(lowercase__ )
if special_strings is not None:
for string in special_strings:
lowercase = diff.replace(lowercase__ , '' )
self.assertEqual(lowercase__ , '' )
def __UpperCAmelCase ( self :str ):
self.one_complete_example('complete_nlp_example.py' , lowercase__ )
self.one_complete_example('complete_nlp_example.py' , lowercase__ )
def __UpperCAmelCase ( self :List[str] ):
lowercase = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
lowercase = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , lowercase__ , lowercase__ , lowercase__ )
self.one_complete_example('complete_cv_example.py' , lowercase__ , lowercase__ , lowercase__ )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class a__ ( _snake_case ):
"""simple docstring"""
A__ : str = False
@classmethod
def __UpperCAmelCase ( cls :str ):
super().setUpClass()
lowercase = tempfile.mkdtemp()
lowercase = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
lowercase = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def __UpperCAmelCase ( cls :str ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def __UpperCAmelCase ( self :Dict ):
lowercase = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def __UpperCAmelCase ( self :int ):
lowercase = F"""
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
""".split()
lowercase = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def __UpperCAmelCase ( self :Any ):
lowercase = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )}
""".split()
lowercase = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
self.assertNotIn('epoch 0:' , lowercase__ )
self.assertIn('epoch 1:' , lowercase__ )
def __UpperCAmelCase ( self :Tuple ):
lowercase = F"""
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )}
""".split()
lowercase = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
if torch.cuda.is_available():
lowercase = torch.cuda.device_count()
else:
lowercase = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , lowercase__ )
self.assertIn('epoch 1:' , lowercase__ )
else:
self.assertIn('epoch 0:' , lowercase__ )
self.assertIn('epoch 1:' , lowercase__ )
@slow
def __UpperCAmelCase ( self :Tuple ):
lowercase = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
lowercase = run_command(self._launch_args + testargs , return_stdout=lowercase__ )
lowercase = re.findall('({.+})' , lowercase__ )
lowercase = [r for r in results if 'accuracy' in r][-1]
lowercase = ast.literal_eval(lowercase__ )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def __UpperCAmelCase ( self :int ):
lowercase = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def __UpperCAmelCase ( self :Tuple ):
with tempfile.TemporaryDirectory() as tmpdir:
lowercase = F"""
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
""".split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(lowercase__ , 'tracking' ) ) )
def __UpperCAmelCase ( self :Tuple ):
lowercase = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def __UpperCAmelCase ( self :Optional[Any] ):
lowercase = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 701
|
from collections import deque
from math import floor
from random import random
from time import time
class a__ :
"""simple docstring"""
def __init__( self :Dict ):
lowercase = {}
def __UpperCAmelCase ( self :Dict , lowercase__ :str , lowercase__ :Optional[Any] , lowercase__ :int=1 ):
if self.graph.get(lowercase__ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
lowercase = [[w, v]]
if not self.graph.get(lowercase__ ):
lowercase = []
def __UpperCAmelCase ( self :List[str] ):
return list(self.graph )
def __UpperCAmelCase ( self :Optional[Any] , lowercase__ :Any , lowercase__ :Dict ):
if self.graph.get(lowercase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase__ )
def __UpperCAmelCase ( self :str , lowercase__ :Optional[Any]=-2 , lowercase__ :Any=-1 ):
if s == d:
return []
lowercase = []
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return visited
def __UpperCAmelCase ( self :Optional[int] , lowercase__ :List[Any]=-1 ):
if c == -1:
lowercase = floor(random() * 1_0000 ) + 10
for i in range(lowercase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase__ , lowercase__ , 1 )
def __UpperCAmelCase ( self :int , lowercase__ :List[str]=-2 ):
lowercase = deque()
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
d.append(lowercase__ )
visited.append(lowercase__ )
while d:
lowercase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self :int , lowercase__ :int ):
lowercase = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def __UpperCAmelCase ( self :List[Any] , lowercase__ :Optional[int] ):
return len(self.graph[u] )
def __UpperCAmelCase ( self :str , lowercase__ :Optional[Any]=-2 ):
lowercase = []
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = s
lowercase = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return sorted_nodes
def __UpperCAmelCase ( self :int ):
lowercase = []
lowercase = []
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = -2
lowercase = []
lowercase = s
lowercase = False
lowercase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase = len(lowercase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase = True
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = False
indirect_parents.append(lowercase__ )
lowercase = s
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return list(lowercase__ )
def __UpperCAmelCase ( self :List[str] ):
lowercase = []
lowercase = []
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = -2
lowercase = []
lowercase = s
lowercase = False
lowercase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase = len(lowercase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase = True
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = False
indirect_parents.append(lowercase__ )
lowercase = s
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return False
def __UpperCAmelCase ( self :int , lowercase__ :Tuple=-2 , lowercase__ :Any=-1 ):
lowercase = time()
self.dfs(lowercase__ , lowercase__ )
lowercase = time()
return end - begin
def __UpperCAmelCase ( self :str , lowercase__ :List[Any]=-2 ):
lowercase = time()
self.bfs(lowercase__ )
lowercase = time()
return end - begin
class a__ :
"""simple docstring"""
def __init__( self :Optional[int] ):
lowercase = {}
def __UpperCAmelCase ( self :Union[str, Any] , lowercase__ :Tuple , lowercase__ :Tuple , lowercase__ :str=1 ):
# check if the u exists
if self.graph.get(lowercase__ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
lowercase = [[w, v]]
# add the other way
if self.graph.get(lowercase__ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
lowercase = [[w, u]]
def __UpperCAmelCase ( self :Tuple , lowercase__ :Any , lowercase__ :List[Any] ):
if self.graph.get(lowercase__ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(lowercase__ )
# the other way round
if self.graph.get(lowercase__ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(lowercase__ )
def __UpperCAmelCase ( self :List[str] , lowercase__ :Union[str, Any]=-2 , lowercase__ :List[str]=-1 ):
if s == d:
return []
lowercase = []
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(lowercase__ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return visited
def __UpperCAmelCase ( self :Optional[Any] , lowercase__ :Union[str, Any]=-1 ):
if c == -1:
lowercase = floor(random() * 1_0000 ) + 10
for i in range(lowercase__ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
lowercase = floor(random() * c ) + 1
if n != i:
self.add_pair(lowercase__ , lowercase__ , 1 )
def __UpperCAmelCase ( self :Optional[Any] , lowercase__ :str=-2 ):
lowercase = deque()
lowercase = []
if s == -2:
lowercase = list(self.graph )[0]
d.append(lowercase__ )
visited.append(lowercase__ )
while d:
lowercase = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def __UpperCAmelCase ( self :List[str] , lowercase__ :Optional[int] ):
return len(self.graph[u] )
def __UpperCAmelCase ( self :List[Any] ):
lowercase = []
lowercase = []
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = -2
lowercase = []
lowercase = s
lowercase = False
lowercase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase = len(lowercase__ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase = True
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = False
indirect_parents.append(lowercase__ )
lowercase = s
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return list(lowercase__ )
def __UpperCAmelCase ( self :Any ):
lowercase = []
lowercase = []
lowercase = list(self.graph )[0]
stack.append(lowercase__ )
visited.append(lowercase__ )
lowercase = -2
lowercase = []
lowercase = s
lowercase = False
lowercase = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
lowercase = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
lowercase = len(lowercase__ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
lowercase = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
lowercase = True
if len(lowercase__ ) != 0:
lowercase = stack[len(lowercase__ ) - 1]
else:
lowercase = False
indirect_parents.append(lowercase__ )
lowercase = s
lowercase = ss
# check if se have reached the starting point
if len(lowercase__ ) == 0:
return False
def __UpperCAmelCase ( self :str ):
return list(self.graph )
def __UpperCAmelCase ( self :List[str] , lowercase__ :Tuple=-2 , lowercase__ :Union[str, Any]=-1 ):
lowercase = time()
self.dfs(lowercase__ , lowercase__ )
lowercase = time()
return end - begin
def __UpperCAmelCase ( self :Any , lowercase__ :Any=-2 ):
lowercase = time()
self.bfs(lowercase__ )
lowercase = time()
return end - begin
| 314
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.