code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
def _lowerCamelCase ( lowerCamelCase_: list ):
'''simple docstring'''
if len(lowerCamelCase_ ) <= 1:
return lst
A : Union[str, Any] = 1
while i < len(lowerCamelCase_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
A , A : Any = lst[i], lst[i - 1]
i -= 1
if i == 0:
A : Any = 1
return lst
if __name__ == "__main__":
UpperCamelCase_ = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase_ = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 256
|
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _SCREAMING_SNAKE_CASE :
def __init__( self : Dict , snake_case_ : Optional[int] , snake_case_ : Union[str, Any]=2 , snake_case_ : Optional[Any]=True , snake_case_ : Any=False , snake_case_ : int=10 , snake_case_ : str=3 , snake_case_ : Optional[int]=32 * 4 , snake_case_ : Tuple=32 * 6 , snake_case_ : Union[str, Any]=4 , snake_case_ : Any=32 , ):
"""simple docstring"""
A : str = parent
A : Optional[Any] = batch_size
A : Union[str, Any] = is_training
A : Optional[int] = use_auxiliary_loss
A : Any = num_queries
A : Dict = num_channels
A : Any = min_size
A : Tuple = max_size
A : Dict = num_labels
A : int = mask_feature_size
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
snake_case_ )
A : Union[str, Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ )
A : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5
).float()
A : int = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long()
A : List[Any] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A , A , A , A , A : Tuple = self.prepare_config_and_inputs()
A : Optional[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def _UpperCAmelCase ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : int ):
"""simple docstring"""
A : Optional[Any] = output.encoder_hidden_states
A : Any = output.pixel_decoder_hidden_states
A : List[str] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers )
def _UpperCAmelCase ( self : Union[str, Any] , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[Any]=False ):
"""simple docstring"""
with torch.no_grad():
A : str = MaskFormerModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
A : Tuple = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
A : Union[str, Any] = model(snake_case_ , output_hidden_states=snake_case_ )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(snake_case_ , snake_case_ )
def _UpperCAmelCase ( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
"""simple docstring"""
A : int = MaskFormerForInstanceSegmentation(config=snake_case_ )
model.to(snake_case_ )
model.eval()
def comm_check_on_output(snake_case_ : int ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A : str = model(pixel_values=snake_case_ , pixel_mask=snake_case_ )
A : List[str] = model(snake_case_ )
comm_check_on_output(snake_case_ )
A : Any = model(
pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
comm_check_on_output(snake_case_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _SCREAMING_SNAKE_CASE ( snake_case, snake_case, unittest.TestCase ):
lowerCamelCase_ = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
lowerCamelCase_ = (
{'feature-extraction': MaskFormerModel, 'image-segmentation': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
lowerCamelCase_ = False
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : Tuple = MaskFormerModelTester(self )
A : str = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
pass
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Any = model_class(snake_case_ )
A : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
A : str = MaskFormerModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
A : Union[str, Any] = (self.model_tester.min_size,) * 2
A : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=snake_case_ ),
'''mask_labels''': torch.randn((2, 10, *size) , device=snake_case_ ),
'''class_labels''': torch.zeros(2 , 10 , device=snake_case_ ).long(),
}
A : List[str] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ )
A : Tuple = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
A , A : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ )
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
A , A : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(snake_case_ ).to(snake_case_ )
A : List[str] = model(**snake_case_ , output_attentions=snake_case_ )
self.assertTrue(outputs.attentions is not None )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
A : Optional[Any] = self.all_model_classes[1]
A , A , A , A , A : Tuple = self.model_tester.prepare_config_and_inputs()
A : List[str] = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
A : Dict = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss
loss.backward()
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
A : int = self.all_model_classes[1]
A , A , A , A , A : str = self.model_tester.prepare_config_and_inputs()
A : int = True
A : Any = True
A : int = model_class(snake_case_ )
model.to(snake_case_ )
model.train()
A : List[Any] = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ )
A : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A : Tuple = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
A : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A : Optional[int] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=snake_case_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
UpperCamelCase_ = 1E-4
def _lowerCamelCase ( ):
'''simple docstring'''
A : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
A : Dict = MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(snake_case_ )
A : List[str] = self.default_image_processor
A : Optional[int] = prepare_img()
A : Dict = image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
A : Dict = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
A : Dict = model(**snake_case_ )
A : Any = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
A : Optional[int] = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
A : Union[str, Any] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]] ).to(snake_case_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
A : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(snake_case_ )
.eval()
)
A : Optional[Any] = self.default_image_processor
A : Optional[Any] = prepare_img()
A : Tuple = image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
A : Optional[int] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
A : str = model(**snake_case_ )
# masks_queries_logits
A : int = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
A : List[str] = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
A : List[Any] = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
A : Dict = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
A : Any = torch.tensor(
[
[1.6_512E00, -5.2_572E00, -3.3_519E00],
[3.6_169E-02, -5.9_025E00, -2.9_313E00],
[1.0_766E-04, -7.7_630E00, -5.1_263E00],
] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
A : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(snake_case_ )
.eval()
)
A : Dict = self.default_image_processor
A : List[Any] = prepare_img()
A : List[Any] = image_processor(snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
A : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(snake_case_ , (1, 3, 800, 1088) )
with torch.no_grad():
A : int = model(**snake_case_ )
# masks_queries_logits
A : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
A : Union[str, Any] = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
A : int = torch.tensor(snake_case_ ).to(snake_case_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) )
# class_queries_logits
A : Optional[Any] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
A : int = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) )
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
A : int = (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(snake_case_ )
.eval()
)
A : Union[str, Any] = self.default_image_processor
A : Dict = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
A : List[str] = inputs['''pixel_values'''].to(snake_case_ )
A : Any = [el.to(snake_case_ ) for el in inputs['''mask_labels''']]
A : Optional[int] = [el.to(snake_case_ ) for el in inputs['''class_labels''']]
with torch.no_grad():
A : int = model(**snake_case_ )
self.assertTrue(outputs.loss is not None )
| 256
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''sayakpaul/vit-msn-base''': '''https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json''',
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class _lowerCAmelCase ( __snake_case ):
__lowerCAmelCase : Tuple = '''vit_msn'''
def __init__( self : Any , a : Optional[int]=768 , a : Optional[Any]=12 , a : Optional[int]=12 , a : List[str]=3072 , a : str="gelu" , a : List[Any]=0.0 , a : Union[str, Any]=0.0 , a : List[Any]=0.02 , a : int=1E-06 , a : Any=224 , a : Optional[Any]=16 , a : List[str]=3 , a : Any=True , **a : Optional[int] , ) -> int:
"""simple docstring"""
super().__init__(**a )
lowercase = hidden_size
lowercase = num_hidden_layers
lowercase = num_attention_heads
lowercase = intermediate_size
lowercase = hidden_act
lowercase = hidden_dropout_prob
lowercase = attention_probs_dropout_prob
lowercase = initializer_range
lowercase = layer_norm_eps
lowercase = image_size
lowercase = patch_size
lowercase = num_channels
lowercase = qkv_bias
| 396
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __UpperCamelCase : str , __UpperCamelCase : dict ):
lowercase = BeautifulSoup(requests.get(__UpperCamelCase , params=__UpperCamelCase ).content , '''html.parser''' )
lowercase = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
lowercase = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
__lowerCAmelCase = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 30,
'''pages''': '''3979-3990''',
'''year''': 2_018,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 396
| 1
|
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Any = len(lowerCAmelCase__ ), len(grid[0] )
if (
min(lowerCAmelCase__ , lowerCAmelCase__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__UpperCAmelCase : Optional[Any] = 0
count += depth_first_search(lowerCAmelCase__ , row + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , row - 1 , lowerCAmelCase__ , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , lowerCAmelCase__ , col + 1 , lowerCAmelCase__ )
count += depth_first_search(lowerCAmelCase__ , lowerCAmelCase__ , col - 1 , lowerCAmelCase__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 462
|
def UpperCamelCase__ ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase__ )
if number < 1:
lowercase = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCAmelCase__ )
lowercase = 1
for i in range(1 ,lowerCAmelCase__ ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
| 0
|
__SCREAMING_SNAKE_CASE =tuple[float, float, float]
__SCREAMING_SNAKE_CASE =tuple[float, float, float]
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = end_pointa[0] - end_pointa[0]
SCREAMING_SNAKE_CASE_ = end_pointa[1] - end_pointa[1]
SCREAMING_SNAKE_CASE_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
SCREAMING_SNAKE_CASE_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
SCREAMING_SNAKE_CASE_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def a (_lowerCAmelCase , _lowerCAmelCase ):
return tuple(round(__A , __A ) for x in vector ) == (0, 0, 0)
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1_0 ):
SCREAMING_SNAKE_CASE_ = create_vector(__A , __A )
SCREAMING_SNAKE_CASE_ = create_vector(__A , __A )
return is_zero_vector(get_ad_vectors_cross(__A , __A ) , __A )
| 708
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE ={
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE =[
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 89
| 0
|
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowercase__ :str = logging.get_logger(__name__)
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def A_ ( self : Any , __lowercase : Tuple ):
'''simple docstring'''
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Optional[int] = [label.strip() for label in labels.split(''',''' ) if label.strip()]
return labels
def __call__( self : List[Any] , __lowercase : Union[str, Any] , __lowercase : Optional[int] , __lowercase : Optional[int] ):
'''simple docstring'''
if len(__lowercase ) == 0 or len(__lowercase ) == 0:
raise ValueError('''You must include at least one label and at least one sequence.''' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'''The provided hypothesis_template "{}" was not able to be formatted with the target labels. '''
'''Make sure the passed template includes formatting syntax such as {{}} where the label should go.'''
).format(__lowercase ) )
if isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : Optional[Any] = [sequences]
__UpperCAmelCase : Any = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(__lowercase )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(__UpperCAmelCase )
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Any , __lowercase : Any=ZeroShotClassificationArgumentHandler() , *__lowercase : List[str] , **__lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = args_parser
super().__init__(*__lowercase , **__lowercase )
if self.entailment_id == -1:
logger.warning(
'''Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '''
'''-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.''' )
@property
def A_ ( self : Tuple ):
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('''entail''' ):
return ind
return -1
def A_ ( self : Optional[Any] , __lowercase : Tuple , __lowercase : Tuple=True , __lowercase : int=True , __lowercase : Optional[int]=TruncationStrategy.ONLY_FIRST , **__lowercase : Optional[int] ):
'''simple docstring'''
__UpperCAmelCase : Tuple = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'''Tokenizer was not supporting padding necessary for zero-shot, attempting to use '''
''' `pad_token=eos_token`''' )
__UpperCAmelCase : Dict = self.tokenizer.eos_token
try:
__UpperCAmelCase : str = self.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , padding=__lowercase , truncation=__lowercase , )
except Exception as e:
if "too short" in str(__lowercase ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
__UpperCAmelCase : Dict = self.tokenizer(
__lowercase , add_special_tokens=__lowercase , return_tensors=__lowercase , padding=__lowercase , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def A_ ( self : str , **__lowercase : Dict ):
'''simple docstring'''
if kwargs.get('''multi_class''' , __lowercase ) is not None:
__UpperCAmelCase : Optional[int] = kwargs['''multi_class''']
logger.warning(
'''The `multi_class` argument has been deprecated and renamed to `multi_label`. '''
'''`multi_class` will be removed in a future version of Transformers.''' )
__UpperCAmelCase : Tuple = {}
if "candidate_labels" in kwargs:
__UpperCAmelCase : Dict = self._args_parser._parse_labels(kwargs['''candidate_labels'''] )
if "hypothesis_template" in kwargs:
__UpperCAmelCase : str = kwargs['''hypothesis_template''']
__UpperCAmelCase : Any = {}
if "multi_label" in kwargs:
__UpperCAmelCase : Dict = kwargs['''multi_label''']
return preprocess_params, {}, postprocess_params
def __call__( self : Any , __lowercase : Union[str, List[str]] , *__lowercase : List[str] , **__lowercase : Optional[Any] , ):
'''simple docstring'''
if len(__lowercase ) == 0:
pass
elif len(__lowercase ) == 1 and "candidate_labels" not in kwargs:
__UpperCAmelCase : List[Any] = args[0]
else:
raise ValueError(f'''Unable to understand extra arguments {args}''' )
return super().__call__(__lowercase , **__lowercase )
def A_ ( self : Tuple , __lowercase : Optional[Any] , __lowercase : List[Any]=None , __lowercase : Union[str, Any]="This example is {}." ):
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self._args_parser(__lowercase , __lowercase , __lowercase )
for i, (candidate_label, sequence_pair) in enumerate(zip(__lowercase , __lowercase ) ):
__UpperCAmelCase : List[Any] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(__lowercase ) - 1,
**model_input,
}
def A_ ( self : str , __lowercase : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = inputs['''candidate_label''']
__UpperCAmelCase : Tuple = inputs['''sequence''']
__UpperCAmelCase : List[Any] = {k: inputs[k] for k in self.tokenizer.model_input_names}
__UpperCAmelCase : List[Any] = self.model(**__lowercase )
__UpperCAmelCase : List[str] = {
'''candidate_label''': candidate_label,
'''sequence''': sequence,
'''is_last''': inputs['''is_last'''],
**outputs,
}
return model_outputs
def A_ ( self : Tuple , __lowercase : Tuple , __lowercase : List[Any]=False ):
'''simple docstring'''
__UpperCAmelCase : List[str] = [outputs['''candidate_label'''] for outputs in model_outputs]
__UpperCAmelCase : Optional[Any] = [outputs['''sequence'''] for outputs in model_outputs]
__UpperCAmelCase : Union[str, Any] = np.concatenate([output['''logits'''].numpy() for output in model_outputs] )
__UpperCAmelCase : Dict = logits.shape[0]
__UpperCAmelCase : Union[str, Any] = len(__lowercase )
__UpperCAmelCase : Optional[int] = N // n
__UpperCAmelCase : Tuple = logits.reshape((num_sequences, n, -1) )
if multi_label or len(__lowercase ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
__UpperCAmelCase : int = self.entailment_id
__UpperCAmelCase : List[str] = -1 if entailment_id == 0 else 0
__UpperCAmelCase : List[str] = reshaped_outputs[..., [contradiction_id, entailment_id]]
__UpperCAmelCase : int = np.exp(__lowercase ) / np.exp(__lowercase ).sum(-1 , keepdims=__lowercase )
__UpperCAmelCase : Dict = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
__UpperCAmelCase : Tuple = reshaped_outputs[..., self.entailment_id]
__UpperCAmelCase : Optional[int] = np.exp(__lowercase ) / np.exp(__lowercase ).sum(-1 , keepdims=__lowercase )
__UpperCAmelCase : List[Any] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 522
|
"""simple docstring"""
from __future__ import annotations
lowercase__ :Dict = 'Muhammad Umer Farooq'
lowercase__ :Any = 'MIT'
lowercase__ :List[str] = '1.0.0'
lowercase__ :str = 'Muhammad Umer Farooq'
lowercase__ :List[str] = 'contact@muhammadumerfarooq.me'
lowercase__ :Dict = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , __lowercase : str ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : list[str] = []
__UpperCAmelCase : Tuple = domain
def A_ ( self : Any , __lowercase : str , __lowercase : list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCAmelCase : List[Any] = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return ".".join(get_sub_domain_name(UpperCAmelCase_ ).split('''.''' )[-2:] )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return parse.urlparse(UpperCAmelCase_ ).netloc
def lowerCamelCase_ ( UpperCAmelCase_ = "https://github.com" ) ->list[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = get_domain_name(UpperCAmelCase_ )
# Initialize the parser
__UpperCAmelCase : int = Parser(UpperCAmelCase_ )
try:
# Open URL
__UpperCAmelCase : Union[str, Any] = requests.get(UpperCAmelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCAmelCase : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCAmelCase : Optional[int] = requests.get(UpperCAmelCase_ )
# Get the valid email.
__UpperCAmelCase : Tuple = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :List[str] = emails_from_url('https://github.com')
print(f"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails)))
| 522
| 1
|
def lowerCAmelCase ( UpperCamelCase__ : int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = len(_SCREAMING_SNAKE_CASE )
for i in range(length - 1 ):
__SCREAMING_SNAKE_CASE: List[str] = i
for k in range(i + 1 , _SCREAMING_SNAKE_CASE ):
if collection[k] < collection[least]:
__SCREAMING_SNAKE_CASE: Tuple = k
if least != i:
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
A_ : Optional[Any] = input("""Enter numbers separated by a comma:\n""").strip()
A_ : Dict = [int(item) for item in user_input.split(""",""")]
print(selection_sort(unsorted))
| 701
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = parent
__SCREAMING_SNAKE_CASE: List[str] = batch_size
__SCREAMING_SNAKE_CASE: Any = seq_length
__SCREAMING_SNAKE_CASE: List[str] = is_training
__SCREAMING_SNAKE_CASE: int = use_input_mask
__SCREAMING_SNAKE_CASE: Union[str, Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE: Optional[int] = use_labels
__SCREAMING_SNAKE_CASE: List[str] = vocab_size
__SCREAMING_SNAKE_CASE: Any = hidden_size
__SCREAMING_SNAKE_CASE: Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: int = num_attention_heads
__SCREAMING_SNAKE_CASE: Any = intermediate_multiple_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE: str = hidden_dropout
__SCREAMING_SNAKE_CASE: Optional[Any] = attention_dropout
__SCREAMING_SNAKE_CASE: Optional[int] = weight_tying
__SCREAMING_SNAKE_CASE: List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE: List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE: List[Any] = num_labels
__SCREAMING_SNAKE_CASE: List[str] = num_choices
__SCREAMING_SNAKE_CASE: List[Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: Tuple = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case_ ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE: Any = True
return config, input_ids, input_mask, token_labels
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = GPTNeoXJapaneseModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = True
__SCREAMING_SNAKE_CASE: Union[str, Any] = GPTNeoXJapaneseModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = True
__SCREAMING_SNAKE_CASE: Optional[int] = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE: Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE: str = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE: Any = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = output_from_no_past['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE: Optional[int] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE: Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: str = config_and_inputs
__SCREAMING_SNAKE_CASE: Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = GPTNeoXJapaneseModelTester(self )
__SCREAMING_SNAKE_CASE: str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE: int = None
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = '''abeja/gpt-neox-japanese-2.7b'''
__SCREAMING_SNAKE_CASE: List[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__SCREAMING_SNAKE_CASE: List[Any] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__SCREAMING_SNAKE_CASE: List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = []
for prompt in prompts:
__SCREAMING_SNAKE_CASE: Any = tokenizer(_lowerCAmelCase , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE: int = model.generate(_lowerCAmelCase , max_length=50 )
__SCREAMING_SNAKE_CASE: str = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 146
| 0
|
"""simple docstring"""
def a ( __UpperCAmelCase : list ) -> Union[str, Any]:
if any(not isinstance(__UpperCAmelCase , __UpperCAmelCase ) or x < 0 for x in sequence ):
raise TypeError("""Sequence must be list of non-negative integers""" )
for _ in range(len(__UpperCAmelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(__UpperCAmelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 96
|
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_SCREAMING_SNAKE_CASE : List[Any] = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : Optional[int] = 50 # max width of layer names
_SCREAMING_SNAKE_CASE : Union[str, Any] = 70 # max width of quantizer names
def UpperCamelCase_( snake_case : int ):
'''simple docstring'''
snake_case_ = parser.add_argument_group("quant_trainer arguments" )
group.add_argument("--wprec" , type=snake_case , default=8 , help="weight precision" )
group.add_argument("--aprec" , type=snake_case , default=8 , help="activation precision" )
group.add_argument("--quant-per-tensor" , action="store_true" , help="per tensor weight scaling" )
group.add_argument("--quant-disable" , action="store_true" , help="disable all quantizers" )
group.add_argument("--quant-disable-embeddings" , action="store_true" , help="disable all embeddings quantizers" )
group.add_argument("--quant-disable-keyword" , type=snake_case , nargs="+" , help="disable quantizers by keyword" )
group.add_argument("--quant-disable-layer-module" , type=snake_case , help="disable quantizers by keyword under layer." )
group.add_argument("--quant-enable-layer-module" , type=snake_case , help="enable quantizers by keyword under layer" )
group.add_argument("--calibrator" , default="max" , help="which quantization range calibrator to use" )
group.add_argument("--percentile" , default=snake_case , type=snake_case , help="percentile for PercentileCalibrator" )
group.add_argument("--fuse-qkv" , action="store_true" , help="use the same scale factor for qkv" )
group.add_argument("--clip-gelu" , metavar="N" , type=snake_case , help="clip gelu output maximum value to N" )
group.add_argument(
"--recalibrate-weights" , action="store_true" , help=(
"recalibrate weight amaxes by taking the max of the weights."
" amaxes will be computed with the current quantization granularity (axis)."
) , )
def UpperCamelCase_( snake_case : List[str] ):
'''simple docstring'''
if args.calibrator == "max":
snake_case_ = "max"
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("Specify --percentile when using percentile calibrator" )
snake_case_ = "histogram"
elif args.calibrator == "mse":
snake_case_ = "histogram"
else:
raise ValueError(f'Invalid calibrator {args.calibrator}' )
snake_case_ = QuantDescriptor(num_bits=args.aprec , calib_method=snake_case )
snake_case_ = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(snake_case )
quant_nn.QuantLinear.set_default_quant_desc_weight(snake_case )
def UpperCamelCase_( snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=False , snake_case : List[Any]=False ):
'''simple docstring'''
logger.info("Configuring Model for Quantization" )
logger.info(f'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(snake_case , ["embeddings"] , which="weight" , _disabled=snake_case )
if args.quant_disable:
set_quantizer_by_name(snake_case , [""] , _disabled=snake_case )
if args.quant_disable_keyword:
set_quantizer_by_name(snake_case , args.quant_disable_keyword , _disabled=snake_case )
if args.quant_disable_layer_module:
set_quantizer_by_name(snake_case , [r"layer.\d+." + args.quant_disable_layer_module] , _disabled=snake_case )
if args.quant_enable_layer_module:
set_quantizer_by_name(snake_case , [r"layer.\d+." + args.quant_enable_layer_module] , _disabled=snake_case )
if args.recalibrate_weights:
recalibrate_weights(snake_case )
if args.fuse_qkv:
fuse_qkv(snake_case , snake_case )
if args.clip_gelu:
clip_gelu(snake_case , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(snake_case )
def UpperCamelCase_( snake_case : List[Any] ):
'''simple docstring'''
logger.info("Enabling Calibration" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'{name:80}: {module}' )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Optional[int] ):
'''simple docstring'''
logger.info("Loading calibrated amax" )
for name, module in model.named_modules():
if name.endswith("_quantizer" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("percentile" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(snake_case )
def UpperCamelCase_( snake_case : str , snake_case : List[str] ):
'''simple docstring'''
def fusea(snake_case : List[Any] , snake_case : str , snake_case : Dict ):
for mod in [qq, qk, qv]:
if not hasattr(snake_case , "_amax" ):
print(" WARNING: NO AMAX BUFFER" )
return
snake_case_ = qq._amax.detach().item()
snake_case_ = qk._amax.detach().item()
snake_case_ = qv._amax.detach().item()
snake_case_ = max(snake_case , snake_case , snake_case )
qq._amax.fill_(snake_case )
qk._amax.fill_(snake_case )
qv._amax.fill_(snake_case )
logger.info(f' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith(".attention.self" ):
logger.info(f'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def UpperCamelCase_( snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
for name, mod in model.named_modules():
if name.endswith(".output.dense" ) and not name.endswith("attention.output.dense" ):
snake_case_ = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=snake_case )
snake_case_ = mod._input_quantizer._amax.data.detach().item()
logger.info(f'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def UpperCamelCase_( snake_case : Any ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_weight_quantizer" ) and mod._weight_quantizer.axis is not None:
snake_case_ = mod.weight.shape[0]
snake_case_ = mod._weight_quantizer._amax.detach()
snake_case_ = torch.ones(snake_case , dtype=amax.dtype , device=amax.device ) * amax
print(f'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def UpperCamelCase_( snake_case : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_weight_quantizer" ):
if not hasattr(mod.weight_quantizer , "_amax" ):
print("RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
snake_case_ = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
snake_case_ = set(range(len(mod.weight.size() ) ) ) - axis_set
snake_case_ = pytorch_quantization.utils.reduce_amax(mod.weight , axis=snake_case , keepdims=snake_case ).detach()
logger.info(f'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
snake_case_ = amax
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[Any]=2_5 , snake_case : Optional[Any]=1_8_0 , snake_case : int=None ):
'''simple docstring'''
if ignore is None:
snake_case_ = []
elif not isinstance(snake_case , snake_case ):
snake_case_ = [ignore]
snake_case_ = 0
for name, mod in model.named_modules():
if not hasattr(snake_case , "weight" ):
continue
snake_case_ = max(snake_case , len(snake_case ) )
for name, mod in model.named_modules():
snake_case_ = getattr(snake_case , "_input_quantizer" , snake_case )
snake_case_ = getattr(snake_case , "_weight_quantizer" , snake_case )
if not hasattr(snake_case , "weight" ):
continue
if type(snake_case ) in ignore:
continue
if [True for s in ignore if type(snake_case ) is str and s in name]:
continue
snake_case_ = f'Act:{input_q.extra_repr()}'
snake_case_ = f'Wgt:{weight_q.extra_repr()}'
snake_case_ = f'{name:{name_width}} {act_str} {wgt_str}'
if len(snake_case ) <= line_width:
logger.info(snake_case )
else:
logger.info(f'{name:{name_width}} {act_str}' )
logger.info(f'{" ":{name_width}} {wgt_str}' )
def UpperCamelCase_( snake_case : Dict ):
'''simple docstring'''
snake_case_ = 0
for name, mod in model.named_modules():
if isinstance(snake_case , pytorch_quantization.nn.TensorQuantizer ):
print(f'{name:80} {mod}' )
count += 1
print(f'{count} TensorQuantizers found in model' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Dict , snake_case : Any , snake_case : Optional[int] ):
'''simple docstring'''
snake_case_ = getattr(snake_case , snake_case , snake_case )
if quantizer_mod is not None:
assert hasattr(snake_case , snake_case )
setattr(snake_case , snake_case , snake_case )
else:
logger.warning(f'{name} has no {quantizer}' )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Tuple="both" , **snake_case : Union[str, Any] ):
'''simple docstring'''
snake_case_ = f'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
if which in ["input", "both"]:
set_quantizer(snake_case , snake_case , "_input_quantizer" , snake_case , snake_case )
if which in ["weight", "both"]:
set_quantizer(snake_case , snake_case , "_weight_quantizer" , snake_case , snake_case )
logger.info(snake_case )
def UpperCamelCase_( snake_case : Optional[Any] , snake_case : str , **snake_case : str ):
'''simple docstring'''
for name, mod in model.named_modules():
if hasattr(snake_case , "_input_quantizer" ) or hasattr(snake_case , "_weight_quantizer" ):
for n in names:
if re.search(snake_case , snake_case ):
set_quantizers(snake_case , snake_case , **snake_case )
elif name.endswith("_quantizer" ):
for n in names:
if re.search(snake_case , snake_case ):
snake_case_ = f'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += f' {k}={v}'
setattr(snake_case , snake_case , snake_case )
logger.info(snake_case )
| 400
| 0
|
'''simple docstring'''
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
pass
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
pass
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
_snake_case = [
[],
[],
[],
]
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase ):
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("Maximum queue size is 100" )
self.queues[priority].append(lowerCamelCase )
except IndexError:
raise ValueError("Valid priorities are 0, 1, and 2" )
def UpperCamelCase( self ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("All queues are empty" )
def __str__( self ):
return "\n".join(F'''Priority {i}: {q}''' for i, q in enumerate(self.queues ) )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self ):
_snake_case = []
def UpperCamelCase( self , lowerCamelCase ):
if len(self.queue ) == 100:
raise OverFlowError("Maximum queue size is 100" )
self.queue.append(lowerCamelCase )
def UpperCamelCase( self ):
if not self.queue:
raise UnderFlowError("The queue is empty" )
else:
_snake_case = min(self.queue )
self.queue.remove(lowerCamelCase )
return data
def __str__( self ):
return str(self.queue )
def snake_case_ ( ):
'''simple docstring'''
_snake_case = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(SCREAMING_SNAKE_CASE__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(SCREAMING_SNAKE_CASE__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def snake_case_ ( ):
'''simple docstring'''
_snake_case = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(SCREAMING_SNAKE_CASE__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(SCREAMING_SNAKE_CASE__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 368
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
_snake_case = []
for _ in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=10 ):
'''simple docstring'''
_snake_case = []
for step in range(SCREAMING_SNAKE_CASE__ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(SCREAMING_SNAKE_CASE__ , "schedule.bin" )
torch.save(scheduler.state_dict() , SCREAMING_SNAKE_CASE__ )
_snake_case = torch.load(SCREAMING_SNAKE_CASE__ )
scheduler.load_state_dict(SCREAMING_SNAKE_CASE__ )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase , lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase , lowerCamelCase , delta=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase )
_snake_case = torch.tensor([0.4, 0.2, -0.5] )
_snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_snake_case = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_snake_case = criterion(lowerCamelCase , lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def UpperCamelCase( self ):
_snake_case = torch.tensor([0.1, -0.2, -0.1] , requires_grad=lowerCamelCase )
_snake_case = torch.tensor([0.4, 0.2, -0.5] )
_snake_case = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_snake_case = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-30, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=lowerCamelCase , weight_decay=0.0 , relative_step=lowerCamelCase , scale_parameter=lowerCamelCase , warmup_init=lowerCamelCase , )
for _ in range(1_000 ):
_snake_case = criterion(lowerCamelCase , lowerCamelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : Any = nn.Linear(50 , 50 ) if is_torch_available() else None
UpperCAmelCase__ : str = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
UpperCAmelCase__ : Optional[Any] = 10
def UpperCamelCase( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
self.assertEqual(len(lowerCamelCase ) , len(lowerCamelCase ) )
for a, b in zip(lowerCamelCase , lowerCamelCase ):
self.assertAlmostEqual(lowerCamelCase , lowerCamelCase , delta=lowerCamelCase , msg=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_snake_case = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
_snake_case , _snake_case = data
_snake_case = scheduler_func(self.optimizer , **lowerCamelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_snake_case = unwrap_schedule(lowerCamelCase , self.num_steps )
self.assertListAlmostEqual(
lowerCamelCase , lowerCamelCase , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_snake_case = scheduler_func(self.optimizer , **lowerCamelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(lowerCamelCase ) # wrap to test picklability of the schedule
_snake_case = unwrap_and_save_reload_schedule(lowerCamelCase , self.num_steps )
self.assertListEqual(lowerCamelCase , lowerCamelCase , msg=F'''failed for {scheduler_func} in save and reload''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self , lowerCamelCase ):
_snake_case = fn
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
return self.fn(*lowerCamelCase , **lowerCamelCase )
@classmethod
def UpperCamelCase( self , lowerCamelCase ):
_snake_case = list(map(self , scheduler.lr_lambdas ) )
| 368
| 1
|
from __future__ import annotations
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase , lowercase = position
lowercase = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
lowercase = []
for position in positions:
lowercase , lowercase = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(UpperCamelCase__ )
return permissible_positions
def UpperCamelCase__ ( lowerCAmelCase__ ):
return not any(elem == 0 for row in board for elem in row )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
if is_complete(UpperCamelCase__ ):
return True
for position in get_valid_pos(UpperCamelCase__ ,len(UpperCamelCase__ ) ):
lowercase , lowercase = position
if board[y][x] == 0:
lowercase = curr + 1
if open_knight_tour_helper(UpperCamelCase__ ,UpperCamelCase__ ,curr + 1 ):
return True
lowercase = 0
return False
def UpperCamelCase__ ( lowerCAmelCase__ ):
lowercase = [[0 for i in range(UpperCamelCase__ )] for j in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
lowercase = 1
if open_knight_tour_helper(UpperCamelCase__ ,(i, j) ,1 ):
return board
lowercase = 0
lowercase = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 428
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output
| 6
| 0
|
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE ( __snake_case ):
"""simple docstring"""
@require_torch
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(__UpperCamelCase )
BertModel.from_pretrained(__UpperCamelCase )
BertTokenizer.from_pretrained(__UpperCamelCase )
pipeline(task='fill-mask' , model=__UpperCamelCase )
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
snake_case_ = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
snake_case_ = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(__UpperCamelCase )
BertModel.from_pretrained(__UpperCamelCase )
BertTokenizer.from_pretrained(__UpperCamelCase )
pipeline(task='fill-mask' , model=__UpperCamelCase )
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
snake_case_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = '\nfrom transformers import pipeline\n '
snake_case_ = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
snake_case_ = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
snake_case_ = self.get_env()
snake_case_ = '1'
snake_case_ = [sys.executable, '-c', '\n'.join([load, mock, run] )]
snake_case_ = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def __lowerCAmelCase ( self ):
"""simple docstring"""
snake_case_ = '\nfrom transformers import AutoModel\n '
snake_case_ = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
snake_case_ = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
snake_case_ = self.get_env()
snake_case_ = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ = '1'
snake_case_ = subprocess.run(__UpperCamelCase , env=__UpperCamelCase , check=__UpperCamelCase , capture_output=__UpperCamelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 715
|
from collections import defaultdict
def a(lowercase__ , lowercase__ ):
'''simple docstring'''
snake_case_ = first_str.lower().strip()
snake_case_ = second_str.lower().strip()
# Remove whitespace
snake_case_ = first_str.replace(' ' , '' )
snake_case_ = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(lowercase__ ) != len(lowercase__ ):
return False
# Default values for count should be 0
snake_case_ = defaultdict(lowercase__ )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(lowercase__ ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
A = input('Enter the first string ').strip()
A = input('Enter the second string ').strip()
A = check_anagrams(input_a, input_b)
print(f"""{input_a} and {input_b} are {"" if status else "not "}anagrams.""")
| 46
| 0
|
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError("At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training")
# TF training parameters
__SCREAMING_SNAKE_CASE =False
__SCREAMING_SNAKE_CASE =False
def lowercase__( __SCREAMING_SNAKE_CASE : Namespace ):
return TrainCommand(__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( lowercase_ ):
@staticmethod
def _UpperCAmelCase ( __UpperCamelCase ) -> Tuple:
'''simple docstring'''
lowercase_ : Any = parser.add_parser('train' ,help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' ,type=__UpperCamelCase ,required=__UpperCamelCase ,help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' ,)
train_parser.add_argument(
'--column_label' ,type=__UpperCamelCase ,default=0 ,help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' ,type=__UpperCamelCase ,default=1 ,help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' ,type=__UpperCamelCase ,default=2 ,help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' ,action='store_true' ,help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' ,type=__UpperCamelCase ,default='' ,help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' ,type=__UpperCamelCase ,default=0.1 ,help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' ,)
train_parser.add_argument('--output' ,type=__UpperCamelCase ,default='./' ,help='path to saved the trained model.' )
train_parser.add_argument(
'--task' ,type=__UpperCamelCase ,default='text_classification' ,help='Task to train the model on.' )
train_parser.add_argument(
'--model' ,type=__UpperCamelCase ,default='bert-base-uncased' ,help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' ,type=__UpperCamelCase ,default=32 ,help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' ,type=__UpperCamelCase ,default=64 ,help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' ,type=__UpperCamelCase ,default=3e-5 ,help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' ,type=__UpperCamelCase ,default=1e-08 ,help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=__UpperCamelCase )
def __init__( self ,__UpperCamelCase ) -> Any:
'''simple docstring'''
lowercase_ : List[str] = logging.get_logger('transformers-cli/training' )
lowercase_ : List[Any] = 'tf' if is_tf_available() else 'torch'
os.makedirs(args.output ,exist_ok=__UpperCamelCase )
lowercase_ : List[Any] = args.output
lowercase_ : Dict = args.column_label
lowercase_ : str = args.column_text
lowercase_ : Dict = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
lowercase_ : Optional[int] = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
lowercase_ : Dict = Processor.create_from_csv(
args.train_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
lowercase_ : List[str] = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
lowercase_ : List[Any] = Processor.create_from_csv(
args.validation_data ,column_label=args.column_label ,column_text=args.column_text ,column_id=args.column_id ,skip_first_row=args.skip_first_row ,)
lowercase_ : Optional[int] = args.validation_split
lowercase_ : Union[str, Any] = args.train_batch_size
lowercase_ : Union[str, Any] = args.valid_batch_size
lowercase_ : Optional[int] = args.learning_rate
lowercase_ : List[str] = args.adam_epsilon
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
raise NotImplementedError
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
self.pipeline.fit(
self.train_dataset ,validation_data=self.valid_dataset ,validation_split=self.validation_split ,learning_rate=self.learning_rate ,adam_epsilon=self.adam_epsilon ,train_batch_size=self.train_batch_size ,valid_batch_size=self.valid_batch_size ,)
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 425
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class UpperCamelCase ( unittest.TestCase ):
def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> str:
'''simple docstring'''
lowercase_ : Optional[int] = size if size is not None else {'shortest_edge': 20}
lowercase_ : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18}
lowercase_ : str = parent
lowercase_ : str = batch_size
lowercase_ : Union[str, Any] = num_channels
lowercase_ : List[str] = image_size
lowercase_ : Dict = min_resolution
lowercase_ : Union[str, Any] = max_resolution
lowercase_ : Dict = do_resize
lowercase_ : Any = size
lowercase_ : str = do_center_crop
lowercase_ : Tuple = crop_size
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = MobileNetVaImageProcessor if is_vision_available() else None
def _UpperCAmelCase ( self ) -> Tuple:
'''simple docstring'''
lowercase_ : Tuple = MobileNetVaImageProcessingTester(self )
@property
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'size' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'do_center_crop' ) )
self.assertTrue(hasattr(__UpperCamelCase ,'crop_size' ) )
def _UpperCAmelCase ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} )
lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} )
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,Image.Image )
# Test not batched input
lowercase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : List[str] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,np.ndarray )
# Test not batched input
lowercase_ : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : Optional[int] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
def _UpperCAmelCase ( self ) -> int:
'''simple docstring'''
lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase ,torch.Tensor )
# Test not batched input
lowercase_ : Dict = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
# Test batched
lowercase_ : Dict = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) ,)
| 425
| 1
|
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
A : Optional[Any] = {
'bart': (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'bert': (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'bert-base-cased-finetuned-mrpc': (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'dpr': (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'gpt2': (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlnet': (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm': (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'xlm-roberta': (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'transfo-xl': (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'openai-gpt': (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'roberta': (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'layoutlm': (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
'roberta-large-mnli': (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'camembert': (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'flaubert': (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert': (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'distilbert-base-distilled-squad': (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert': (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'lxmert-visual-feature-encoder': (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'ctrl': (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'albert': (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
't5': (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'electra': (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
'wav2vec2': (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True ) -> Dict:
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__snake_case = cached_file(_lowerCAmelCase , _lowerCAmelCase , force_download=not use_cached_models )
__snake_case = config_class.from_json_file(_lowerCAmelCase )
__snake_case = True
__snake_case = True
print(F'''Building TensorFlow model from configuration: {config}''' )
__snake_case = model_class(_lowerCAmelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__snake_case = cached_file(
_lowerCAmelCase , _lowerCAmelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__snake_case = load_pytorch_checkpoint_in_tfa_model(_lowerCAmelCase , _lowerCAmelCase )
if compare_with_pt_model:
__snake_case = tf_model(tf_model.dummy_inputs , training=_lowerCAmelCase ) # build the network
__snake_case = torch.load(_lowerCAmelCase , map_location="cpu" )
__snake_case = pt_model_class.from_pretrained(
pretrained_model_name_or_path=_lowerCAmelCase , config=_lowerCAmelCase , state_dict=_lowerCAmelCase )
with torch.no_grad():
__snake_case = pt_model(**pt_model.dummy_inputs )
__snake_case = pto[0].numpy()
__snake_case = tfo[0].numpy()
__snake_case = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(_lowerCAmelCase , save_format="h5" )
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=False , ) -> Tuple:
'''simple docstring'''
if args_model_type is None:
__snake_case = list(MODEL_CLASSES.keys() )
else:
__snake_case = [args_model_type]
for j, model_type in enumerate(_lowerCAmelCase , start=1 ):
print("=" * 100 )
print(F''' Converting model type {j}/{len(_lowerCAmelCase )}: {model_type}''' )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
__snake_case = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__snake_case = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__snake_case = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(_lowerCAmelCase , _lowerCAmelCase ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
__snake_case = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(_lowerCAmelCase )}: {model_shortcut_name} - model_type {model_type}''' )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
__snake_case = cached_file(_lowerCAmelCase , _lowerCAmelCase , force_download=not use_cached_models )
else:
__snake_case = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__snake_case = cached_file(_lowerCAmelCase , _lowerCAmelCase , force_download=not use_cached_models )
else:
__snake_case = model_shortcut_name
if os.path.isfile(_lowerCAmelCase ):
__snake_case = '''converted_model'''
convert_pt_checkpoint_to_tf(
model_type=_lowerCAmelCase , pytorch_checkpoint_path=_lowerCAmelCase , config_file=_lowerCAmelCase , tf_dump_path=os.path.join(_lowerCAmelCase , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=_lowerCAmelCase , )
if remove_cached_files:
os.remove(_lowerCAmelCase )
os.remove(_lowerCAmelCase )
if __name__ == "__main__":
A : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_dump_path', default=None, type=str, required=True, help='Path to the output Tensorflow dump file.'
)
parser.add_argument(
'--model_type',
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
'convert all the models from AWS.'
),
)
parser.add_argument(
'--pytorch_checkpoint_path',
default=None,
type=str,
help=(
'Path to the PyTorch checkpoint path or shortcut name to download from AWS. '
'If not given, will download and convert all the checkpoints from AWS.'
),
)
parser.add_argument(
'--config_file',
default=None,
type=str,
help=(
'The config json file corresponding to the pre-trained model. \n'
'This specifies the model architecture. If not given and '
'--pytorch_checkpoint_path is not given or is a shortcut name '
'use the configuration associated to the shortcut name on the AWS'
),
)
parser.add_argument(
'--compare_with_pt_model', action='store_true', help='Compare Tensorflow and PyTorch model predictions.'
)
parser.add_argument(
'--use_cached_models',
action='store_true',
help='Use cached models if possible instead of updating to latest checkpoint versions.',
)
parser.add_argument(
'--remove_cached_files',
action='store_true',
help='Remove pytorch models after conversion (save memory when converting in batches).',
)
parser.add_argument('--only_convert_finetuned_models', action='store_true', help='Only convert finetuned models.')
A : int = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 700
|
def _lowerCAmelCase ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
__snake_case = set()
# Replace all the whitespace in our sentence
__snake_case = input_str.replace(" " , "" )
for alpha in input_str:
if "a" <= alpha.lower() <= "z":
frequency.add(alpha.lower() )
return len(_lowerCAmelCase ) == 26
def _lowerCAmelCase ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
__snake_case = [False] * 26
for char in input_str:
if char.islower():
__snake_case = True
elif char.isupper():
__snake_case = True
return all(_lowerCAmelCase )
def _lowerCAmelCase ( _lowerCAmelCase = "The quick brown fox jumps over the lazy dog" , ) -> bool:
'''simple docstring'''
return len({char for char in input_str.lower() if char.isalpha()} ) == 26
def _lowerCAmelCase ( ) -> None:
'''simple docstring'''
from timeit import timeit
__snake_case = "from __main__ import is_pangram, is_pangram_faster, is_pangram_fastest"
print(timeit("is_pangram()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_faster()" , setup=_lowerCAmelCase ) )
print(timeit("is_pangram_fastest()" , setup=_lowerCAmelCase ) )
# 5.348480500048026, 2.6477354579837993, 1.8470395830227062
# 5.036091582966037, 2.644472333951853, 1.8869528750656173
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 473
| 0
|
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import numpy as np
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForMaskedImageModeling,
HfArgumentParser,
Trainer,
TrainingArguments,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_a : Dict = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt")
_a : str = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys())
_a : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "The column name of the images in the files. If not set, will try to use 'image' or 'img'."} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={"help": "A folder containing the training data."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={"help": "A folder containing the validation data."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
_SCREAMING_SNAKE_CASE : int = field(default=3_2 , metadata={"help": "The size of the square patches to use for masking."} )
_SCREAMING_SNAKE_CASE : float = field(
default=0.6 , metadata={"help": "Percentage of patches to mask."} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def a ( self : List[str] ) -> Tuple:
__snake_case = {}
if self.train_dir is not None:
__snake_case = self.train_dir
if self.validation_dir is not None:
__snake_case = self.validation_dir
__snake_case = data_files if data_files else None
@dataclass
class _lowercase :
_SCREAMING_SNAKE_CASE : str = field(
default=__lowercase , metadata={
"help": (
"The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a "
"checkpoint identifier on the hub. "
"Don't set if you want to train a model from scratch."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(__lowercase )} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={"help": "Where do you want to store (cache) the pretrained models/datasets downloaded from the hub"} , )
_SCREAMING_SNAKE_CASE : str = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={"help": "Name or path of preprocessor config."} )
_SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"The size (resolution) of each image. If not specified, will use `image_size` of the configuration."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={
"help": (
"The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase , metadata={"help": "Stride to use for the encoder."} , )
class _lowercase :
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=192 , SCREAMING_SNAKE_CASE_ : int=32 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : Dict=0.6 ) -> List[str]:
__snake_case = input_size
__snake_case = mask_patch_size
__snake_case = model_patch_size
__snake_case = mask_ratio
if self.input_size % self.mask_patch_size != 0:
raise ValueError('Input size must be divisible by mask patch size' )
if self.mask_patch_size % self.model_patch_size != 0:
raise ValueError('Mask patch size must be divisible by model patch size' )
__snake_case = self.input_size // self.mask_patch_size
__snake_case = self.mask_patch_size // self.model_patch_size
__snake_case = self.rand_size**2
__snake_case = int(np.ceil(self.token_count * self.mask_ratio ) )
def __call__( self : List[Any] ) -> Tuple:
__snake_case = np.random.permutation(self.token_count )[: self.mask_count]
__snake_case = np.zeros(self.token_count , dtype=SCREAMING_SNAKE_CASE_ )
__snake_case = 1
__snake_case = mask.reshape((self.rand_size, self.rand_size) )
__snake_case = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 )
return torch.tensor(mask.flatten() )
def _a (lowercase__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
__snake_case = torch.stack([example['pixel_values'] for example in examples] )
__snake_case = torch.stack([example['mask'] for example in examples] )
return {"pixel_values": pixel_values, "bool_masked_pos": mask}
def _a () -> Dict:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__snake_case = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__snake_case , __snake_case , __snake_case = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__snake_case , __snake_case , __snake_case = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_mim' , lowercase__ , lowercase__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__snake_case = training_args.get_process_log_level()
logger.setLevel(lowercase__ )
transformers.utils.logging.set_verbosity(lowercase__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__snake_case = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__snake_case = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to overcome.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset.
__snake_case = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
__snake_case = None if 'validation' in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , lowercase__ ) and data_args.train_val_split > 0.0:
__snake_case = ds['train'].train_test_split(data_args.train_val_split )
__snake_case = split['train']
__snake_case = split['test']
# Create config
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__snake_case = {
'cache_dir': model_args.cache_dir,
'revision': model_args.model_revision,
'use_auth_token': True if model_args.use_auth_token else None,
}
if model_args.config_name_or_path:
__snake_case = AutoConfig.from_pretrained(model_args.config_name_or_path , **lowercase__ )
elif model_args.model_name_or_path:
__snake_case = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__snake_case = CONFIG_MAPPING[model_args.model_type]()
logger.warning('You are instantiating a new config instance from scratch.' )
if model_args.config_overrides is not None:
logger.info(f'Overriding config: {model_args.config_overrides}' )
config.update_from_string(model_args.config_overrides )
logger.info(f'New config: {config}' )
# make sure the decoder_type is "simmim" (only relevant for BEiT)
if hasattr(lowercase__ , 'decoder_type' ):
__snake_case = 'simmim'
# adapt config
__snake_case = model_args.image_size if model_args.image_size is not None else config.image_size
__snake_case = model_args.patch_size if model_args.patch_size is not None else config.patch_size
__snake_case = (
model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride
)
config.update(
{
'image_size': model_args.image_size,
'patch_size': model_args.patch_size,
'encoder_stride': model_args.encoder_stride,
} )
# create image processor
if model_args.image_processor_name:
__snake_case = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **lowercase__ )
elif model_args.model_name_or_path:
__snake_case = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **lowercase__ )
else:
__snake_case = {
conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items()
}
__snake_case = IMAGE_PROCESSOR_TYPES[model_args.model_type]()
# create model
if model_args.model_name_or_path:
__snake_case = AutoModelForMaskedImageModeling.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=lowercase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info('Training new model from scratch' )
__snake_case = AutoModelForMaskedImageModeling.from_config(lowercase__ )
if training_args.do_train:
__snake_case = ds['train'].column_names
else:
__snake_case = ds['validation'].column_names
if data_args.image_column_name is not None:
__snake_case = data_args.image_column_name
elif "image" in column_names:
__snake_case = 'image'
elif "img" in column_names:
__snake_case = 'img'
else:
__snake_case = column_names[0]
# transformations as done in original SimMIM paper
# source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py
__snake_case = Compose(
[
Lambda(lambda lowercase__ : img.convert('RGB' ) if img.mode != "RGB" else img ),
RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
# create mask generator
__snake_case = MaskGenerator(
input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , )
def preprocess_images(lowercase__ : Union[str, Any] ):
__snake_case = [transforms(lowercase__ ) for image in examples[image_column_name]]
__snake_case = [mask_generator() for i in range(len(examples[image_column_name] ) )]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError('--do_train requires a train dataset' )
if data_args.max_train_samples is not None:
__snake_case = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(lowercase__ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError('--do_eval requires a validation dataset' )
if data_args.max_eval_samples is not None:
__snake_case = (
ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(lowercase__ )
# Initialize our trainer
__snake_case = Trainer(
model=lowercase__ , args=lowercase__ , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=lowercase__ , data_collator=lowercase__ , )
# Training
if training_args.do_train:
__snake_case = None
if training_args.resume_from_checkpoint is not None:
__snake_case = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__snake_case = last_checkpoint
__snake_case = trainer.train(resume_from_checkpoint=lowercase__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__snake_case = trainer.evaluate()
trainer.log_metrics('eval' , lowercase__ )
trainer.save_metrics('eval' , lowercase__ )
# Write model card and (optionally) push to hub
__snake_case = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'masked-image-modeling',
'dataset': data_args.dataset_name,
'tags': ['masked-image-modeling'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**lowercase__ )
else:
trainer.create_model_card(**lowercase__ )
if __name__ == "__main__":
main()
| 56
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__magic_name__ : List[str] = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : Union[str, Any] = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
__magic_name__ : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615
| 0
|
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __magic_name__ ( lowercase_ ):
"""simple docstring"""
_UpperCamelCase = (CMStochasticIterativeScheduler,)
_UpperCamelCase = 10
def _UpperCAmelCase ( self , **a__ ):
_lowerCamelCase = {
'''num_train_timesteps''': 2_01,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
config.update(**a__ )
return config
def _UpperCAmelCase ( self ):
_lowerCamelCase = 10
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = self.scheduler_classes[0](**a__ )
scheduler.set_timesteps(a__ )
_lowerCamelCase = scheduler.timesteps[0]
_lowerCamelCase = scheduler.timesteps[1]
_lowerCamelCase = self.dummy_sample
_lowerCamelCase = 0.1 * sample
_lowerCamelCase = scheduler.step(a__ , a__ , a__ ).prev_sample
_lowerCamelCase = scheduler.step(a__ , a__ , a__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=a__ )
def _UpperCAmelCase ( self ):
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**a__ )
_lowerCamelCase = 1
scheduler.set_timesteps(a__ )
_lowerCamelCase = scheduler.timesteps
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(a__ ):
# 1. scale model input
_lowerCamelCase = scheduler.scale_model_input(a__ , a__ )
# 2. predict noise residual
_lowerCamelCase = model(a__ , a__ )
# 3. predict previous sample x_t-1
_lowerCamelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ).prev_sample
_lowerCamelCase = pred_prev_sample
_lowerCamelCase = torch.sum(torch.abs(a__ ) )
_lowerCamelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2510 ) < 1E-3
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**a__ )
_lowerCamelCase = [1_06, 0]
scheduler.set_timesteps(timesteps=a__ )
_lowerCamelCase = scheduler.timesteps
_lowerCamelCase = torch.manual_seed(0 )
_lowerCamelCase = self.dummy_model()
_lowerCamelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_lowerCamelCase = scheduler.scale_model_input(a__ , a__ )
# 2. predict noise residual
_lowerCamelCase = model(a__ , a__ )
# 3. predict previous sample x_t-1
_lowerCamelCase = scheduler.step(a__ , a__ , a__ , generator=a__ ).prev_sample
_lowerCamelCase = pred_prev_sample
_lowerCamelCase = torch.sum(torch.abs(a__ ) )
_lowerCamelCase = torch.mean(torch.abs(a__ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4527 ) < 1E-3
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**a__ )
_lowerCamelCase = [39, 30, 12, 15, 0]
with self.assertRaises(a__ , msg='''`timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**a__ )
_lowerCamelCase = [39, 30, 12, 1, 0]
_lowerCamelCase = len(a__ )
with self.assertRaises(a__ , msg='''Can only pass one of `num_inference_steps` or `timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=a__ , timesteps=a__ )
def _UpperCAmelCase ( self ):
_lowerCamelCase = self.scheduler_classes[0]
_lowerCamelCase = self.get_scheduler_config()
_lowerCamelCase = scheduler_class(**a__ )
_lowerCamelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a__ , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=a__ )
| 297
|
from maths.prime_factors import prime_factors
def _lowerCamelCase ( _a ):
"""simple docstring"""
if not isinstance(_a , _a ):
_lowerCamelCase = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_a )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 297
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class _lowerCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , """num_attention_heads""" ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , """num_encoder_blocks""" ) )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=1_3 , _lowercase=6_4 , _lowercase=3 , _lowercase=4 , _lowercase=[2, 2, 2, 2] , _lowercase=[8, 4, 2, 1] , _lowercase=[1_6, 3_2, 6_4, 1_2_8] , _lowercase=[1, 4, 8, 1_6] , _lowercase=[1, 2, 4, 8] , _lowercase=True , _lowercase=True , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=0.02 , _lowercase=3 , _lowercase=None , ) -> str:
'''simple docstring'''
snake_case_ : str = parent
snake_case_ : List[str] = batch_size
snake_case_ : List[Any] = image_size
snake_case_ : Optional[Any] = num_channels
snake_case_ : str = num_encoder_blocks
snake_case_ : Optional[Any] = sr_ratios
snake_case_ : str = depths
snake_case_ : Optional[int] = hidden_sizes
snake_case_ : int = downsampling_rates
snake_case_ : Dict = num_attention_heads
snake_case_ : int = is_training
snake_case_ : Any = use_labels
snake_case_ : Optional[int] = hidden_act
snake_case_ : List[Any] = hidden_dropout_prob
snake_case_ : Any = attention_probs_dropout_prob
snake_case_ : Optional[Any] = initializer_range
snake_case_ : str = num_labels
snake_case_ : Union[str, Any] = scope
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ : Optional[Any] = None
if self.use_labels:
snake_case_ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case_ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Dict:
'''simple docstring'''
snake_case_ : Optional[int] = SegformerModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : Any = model(__lowerCamelCase )
snake_case_ : Optional[int] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Dict = self.num_labels
snake_case_ : Tuple = SegformerForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
snake_case_ : Dict = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> str:
'''simple docstring'''
snake_case_ : str = 1
snake_case_ : Tuple = SegformerForSemanticSegmentation(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case_ : Tuple = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(__lowerCamelCase )
snake_case_ : int = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertGreater(result.loss , 0.0 )
def UpperCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case_ : int = self.prepare_config_and_inputs()
snake_case_ , snake_case_ , snake_case_ : Any = config_and_inputs
snake_case_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = False
_lowerCamelCase = False
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = SegformerModelTester(self )
snake_case_ : List[Any] = SegformerConfigTester(self , config_class=__lowerCamelCase )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
snake_case_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__lowerCamelCase )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__lowerCamelCase )
@unittest.skip("""SegFormer does not use inputs_embeds""" )
def UpperCAmelCase__ ( self ) -> Union[str, Any]:
'''simple docstring'''
pass
@unittest.skip("""SegFormer does not have get_input_embeddings method and get_output_embeddings methods""" )
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
pass
def UpperCAmelCase__ ( self ) -> int:
'''simple docstring'''
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : List[Any] = model_class(__lowerCamelCase )
snake_case_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ : int = [*signature.parameters.keys()]
snake_case_ : str = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
snake_case_ , snake_case_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : str = True
for model_class in self.all_model_classes:
snake_case_ : str = True
snake_case_ : Tuple = False
snake_case_ : str = True
snake_case_ : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case_ : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
snake_case_ : str = outputs.attentions
snake_case_ : Optional[int] = sum(self.model_tester.depths )
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case_ : Dict = True
snake_case_ : int = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case_ : Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
snake_case_ : List[str] = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first attentions (first block, first layer)
snake_case_ : Dict = (self.model_tester.image_size // 4) ** 2
snake_case_ : Optional[Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
snake_case_ : Optional[Any] = (self.model_tester.image_size // 3_2) ** 2
snake_case_ : Optional[Any] = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
snake_case_ : int = len(__lowerCamelCase )
# Check attention is always last and order is fine
snake_case_ : List[Any] = True
snake_case_ : Dict = True
snake_case_ : str = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case_ : Dict = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(out_len + 1 , len(__lowerCamelCase ) )
snake_case_ : Tuple = outputs.attentions
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first attentions (first block, first layer)
snake_case_ : Dict = (self.model_tester.image_size // 4) ** 2
snake_case_ : Any = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def UpperCAmelCase__ ( self ) -> Any:
'''simple docstring'''
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
snake_case_ : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case_ : Optional[Any] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
snake_case_ : Dict = outputs.hidden_states
snake_case_ : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
snake_case_ , snake_case_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ : Tuple = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case_ : List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase__ ( self ) -> Dict:
'''simple docstring'''
if not self.model_tester.is_training:
return
snake_case_ , snake_case_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case_ : str = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ):
continue
snake_case_ : Dict = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
snake_case_ : Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
snake_case_ : List[str] = model(**__lowerCamelCase ).loss
loss.backward()
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
pass
@slow
def UpperCAmelCase__ ( self ) -> List[str]:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ : int = SegformerModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Optional[int] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
snake_case_ : Any = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__lowerCamelCase )
snake_case_ : Dict = prepare_img()
snake_case_ : str = image_processor(images=__lowerCamelCase , return_tensors="""pt""" )
snake_case_ : Optional[int] = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
snake_case_ : Optional[Any] = model(__lowerCamelCase )
snake_case_ : Dict = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
snake_case_ : str = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : List[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
snake_case_ : Any = SegformerForSemanticSegmentation.from_pretrained(
"""nvidia/segformer-b1-finetuned-cityscapes-1024-1024""" ).to(__lowerCamelCase )
snake_case_ : Optional[Any] = prepare_img()
snake_case_ : List[Any] = image_processor(images=__lowerCamelCase , return_tensors="""pt""" )
snake_case_ : Optional[Any] = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
snake_case_ : str = model(__lowerCamelCase )
snake_case_ : List[str] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
snake_case_ : Optional[Any] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-1 ) )
@slow
def UpperCAmelCase__ ( self ) -> str:
'''simple docstring'''
snake_case_ : Optional[Any] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=__lowerCamelCase , align=__lowerCamelCase , do_random_crop=__lowerCamelCase )
snake_case_ : Dict = SegformerForSemanticSegmentation.from_pretrained("""nvidia/segformer-b0-finetuned-ade-512-512""" ).to(
__lowerCamelCase )
snake_case_ : str = prepare_img()
snake_case_ : Tuple = image_processor(images=__lowerCamelCase , return_tensors="""pt""" )
snake_case_ : List[Any] = encoded_inputs.pixel_values.to(__lowerCamelCase )
with torch.no_grad():
snake_case_ : Tuple = model(__lowerCamelCase )
snake_case_ : Union[str, Any] = outputs.logits.detach().cpu()
snake_case_ : Tuple = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase , target_sizes=[(5_0_0, 3_0_0)] )
snake_case_ : Dict = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
snake_case_ : int = image_processor.post_process_semantic_segmentation(outputs=__lowerCamelCase )
snake_case_ : List[str] = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , __lowerCamelCase )
| 58
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a__: List[Any] = {
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__: Any = [
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
a__: Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 190
| 0
|
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class UpperCamelCase ( lowercase ):
@staticmethod
@abstractmethod
def _lowercase (_A : ArgumentParser) -> Tuple:
raise NotImplementedError()
@abstractmethod
def _lowercase (self : Any) -> Optional[Any]:
raise NotImplementedError()
| 703
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a : Optional[Any]= {
"configuration_wav2vec2": ["WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config"],
"feature_extraction_wav2vec2": ["Wav2Vec2FeatureExtractor"],
"processing_wav2vec2": ["Wav2Vec2Processor"],
"tokenization_wav2vec2": ["Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[Any]= [
"WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Wav2Vec2ForAudioFrameClassification",
"Wav2Vec2ForCTC",
"Wav2Vec2ForMaskedLM",
"Wav2Vec2ForPreTraining",
"Wav2Vec2ForSequenceClassification",
"Wav2Vec2ForXVector",
"Wav2Vec2Model",
"Wav2Vec2PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str]= [
"TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFWav2Vec2ForCTC",
"TFWav2Vec2Model",
"TFWav2Vec2PreTrainedModel",
"TFWav2Vec2ForSequenceClassification",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[Any]= [
"FlaxWav2Vec2ForCTC",
"FlaxWav2Vec2ForPreTraining",
"FlaxWav2Vec2Model",
"FlaxWav2Vec2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
_a : Tuple= _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 192
| 0
|
def __UpperCAmelCase ( a_ , a_ , a_ , a_=None):
snake_case_ = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case_ = True, True
snake_case_ = dfs(a_ , a_ , a_ , a_)
return path
def __UpperCAmelCase ( a_ , a_):
snake_case_ = 0
snake_case_ = -1
for i in range(a_):
if i not in graph.keys():
continue
if len(graph[i]) % 2 == 1:
odd_degree_nodes += 1
snake_case_ = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def __UpperCAmelCase ( a_ , a_):
snake_case_ = [[False for _ in range(max_node + 1)] for _ in range(max_node + 1)]
snake_case_ = check_circuit_or_path(a_ , a_)
if check == 3:
print('graph is not Eulerian')
print('no path')
return
snake_case_ = 1
if check == 2:
snake_case_ = odd_node
print('graph has a Euler path')
if check == 1:
print('graph has a Euler cycle')
snake_case_ = dfs(a_ , a_ , a_)
print(a_)
def __UpperCAmelCase ( ):
snake_case_ = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case_ = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case_ = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case_ = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case_ = {
1: [],
2: []
# all degree is zero
}
snake_case_ = 10
check_euler(a_ , a_)
check_euler(a_ , a_)
check_euler(a_ , a_)
check_euler(a_ , a_)
check_euler(a_ , a_)
if __name__ == "__main__":
main()
| 198
|
'''simple docstring'''
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("""1.6"""):
__a: Optional[Any] = True
from torch.cuda.amp import autocast
__a: Optional[Any] = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to freeze the feature extractor layers of the model."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Whether to log verbose messages or not."} , )
SCREAMING_SNAKE_CASE = field(
default=2.0 , metadata={"help": "Maximum temperature for gumbel softmax."} )
SCREAMING_SNAKE_CASE = field(
default=0.5 , metadata={"help": "Minimum temperature for gumbel softmax."} )
SCREAMING_SNAKE_CASE = field(
default=0.999995 , metadata={"help": "Decay of gumbel temperature during training."} )
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
lowercase__ : int = logging.WARNING
if model_args.verbose_logging:
lowercase__ : str = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
lowercase__ : List[Any] = logging.INFO
logger.setLevel(UpperCAmelCase )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
SCREAMING_SNAKE_CASE = field(
default="train" , metadata={
"help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'"
} , )
SCREAMING_SNAKE_CASE = field(
default="validation" , metadata={
"help": (
"The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"
)
} , )
SCREAMING_SNAKE_CASE = field(
default="file" , metadata={"help": "Column in the dataset that contains speech file path. Defaults to 'file'"} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "Overwrite the cached preprocessed datasets or not."} )
SCREAMING_SNAKE_CASE = field(
default=1 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
SCREAMING_SNAKE_CASE = field(
default=a__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
SCREAMING_SNAKE_CASE = field(
default=20.0 , metadata={"help": "Filter audio files that are longer than `max_duration_in_seconds` seconds"} )
@dataclass
class UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = "longest"
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = None
def __call__( self , __lowerCAmelCase ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
lowercase__ : List[str] = self.feature_extractor.pad(
__lowerCAmelCase , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
lowercase__ : List[str] = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
lowercase__ : Optional[Any] = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
lowercase__ : str = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
lowercase__ : int = torch.zeros(
(batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
lowercase__ : Any = 1
lowercase__ : Dict = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
lowercase__ : List[Any] = _compute_mask_indices(
(batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=__lowerCAmelCase , min_masks=2 , )
return batch
class UpperCAmelCase ( a__ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , __lowerCAmelCase=1 , __lowerCAmelCase=0 , __lowerCAmelCase=1.0 , **__lowerCAmelCase ) -> int:
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
lowercase__ : Union[str, Any] = 0
lowercase__ : List[str] = max_gumbel_temp
lowercase__ : Union[str, Any] = min_gumbel_temp
lowercase__ : List[Any] = gumbel_temp_decay
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase ) -> torch.Tensor:
model.train()
lowercase__ : List[Any] = self._prepare_inputs(__lowerCAmelCase )
if self.use_amp:
with autocast():
lowercase__ : List[Any] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
else:
lowercase__ : Optional[Any] = self.compute_loss(__lowerCAmelCase , __lowerCAmelCase )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
lowercase__ : str = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
lowercase__ : Union[str, Any] = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" )
if self.args.gradient_accumulation_steps > 1:
lowercase__ : str = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__lowerCAmelCase ).backward()
elif self.use_apex:
with amp.scale_loss(__lowerCAmelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__lowerCAmelCase )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) )
return loss.detach()
def __UpperCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase__ : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase__ , lowercase__ , lowercase__ : Dict = parser.parse_args_into_dataclasses()
configure_logger(UpperCAmelCase , UpperCAmelCase )
# Downloading and loading a dataset from the hub.
lowercase__ : Union[str, Any] = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
lowercase__ : Dict = DatasetDict()
lowercase__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""" , cache_dir=model_args.cache_dir , )
lowercase__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""" , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
lowercase__ : Any = DatasetDict()
lowercase__ : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
lowercase__ : str = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""{data_args.train_split_name}""" , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
lowercase__ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCAmelCase )
def prepare_dataset(UpperCAmelCase ):
# check that all files have the correct sampling rate
lowercase__ , lowercase__ : int = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
lowercase__ : int = datasets.map(
UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
lowercase__ : List[Any] = vectorized_datasets.filter(
lambda UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(UpperCAmelCase ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
lowercase__ : Tuple = vectorized_datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
lowercase__ : Union[str, Any] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
lowercase__ : int = WavaVecaForPreTraining(UpperCAmelCase )
lowercase__ : List[Any] = DataCollatorForWavaVecaPretraining(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
lowercase__ : Any = WavaVecaPreTrainer(
model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 152
| 0
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def __UpperCamelCase( _A : str ):
'''simple docstring'''
if not sentence:
return ""
UpperCAmelCase__ : Union[str, Any] = dict(zip(_A , _A ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 496
|
'''simple docstring'''
def __UpperCamelCase( _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : int = len(_A )
UpperCAmelCase__ : int = len(_A )
UpperCAmelCase__ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
UpperCAmelCase__ : list = []
for char_count in range(_A ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(_A )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 496
| 1
|
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Union[str, Any] = {
"SenseTime/deformable-detr": "https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json",
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class _lowerCamelCase( _a ):
lowercase_ : Dict = """deformable_detr"""
lowercase_ : str = {
"""hidden_size""": """d_model""",
"""num_attention_heads""": """encoder_attention_heads""",
}
def __init__( self, lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=3, lowerCamelCase=3_00, lowerCamelCase=10_24, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=6, lowerCamelCase=10_24, lowerCamelCase=8, lowerCamelCase=0.0, lowerCamelCase=True, lowerCamelCase="relu", lowerCamelCase=2_56, lowerCamelCase=0.1, lowerCamelCase=0.0, lowerCamelCase=0.0, lowerCamelCase=0.0_2, lowerCamelCase=1.0, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase="sine", lowerCamelCase="resnet50", lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=4, lowerCamelCase=False, lowerCamelCase=3_00, lowerCamelCase=False, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=1, lowerCamelCase=5, lowerCamelCase=2, lowerCamelCase=0.1, lowerCamelCase=0.2_5, lowerCamelCase=False, **lowerCamelCase, ) -> Dict:
"""simple docstring"""
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.')
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.')
_lowercase : str = CONFIG_MAPPING['resnet'](out_features=['stage4'])
elif isinstance(lowerCamelCase, lowerCamelCase):
_lowercase : str = backbone_config.get('model_type')
_lowercase : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_lowercase : str = config_class.from_dict(lowerCamelCase)
_lowercase : Dict = use_timm_backbone
_lowercase : Optional[Any] = backbone_config
_lowercase : List[str] = num_channels
_lowercase : Tuple = num_queries
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Dict = d_model
_lowercase : List[str] = encoder_ffn_dim
_lowercase : str = encoder_layers
_lowercase : Optional[int] = encoder_attention_heads
_lowercase : Tuple = decoder_ffn_dim
_lowercase : Any = decoder_layers
_lowercase : Optional[int] = decoder_attention_heads
_lowercase : Union[str, Any] = dropout
_lowercase : int = attention_dropout
_lowercase : Optional[Any] = activation_dropout
_lowercase : Dict = activation_function
_lowercase : int = init_std
_lowercase : List[str] = init_xavier_std
_lowercase : str = encoder_layerdrop
_lowercase : Optional[int] = auxiliary_loss
_lowercase : str = position_embedding_type
_lowercase : List[str] = backbone
_lowercase : List[str] = use_pretrained_backbone
_lowercase : List[str] = dilation
# deformable attributes
_lowercase : str = num_feature_levels
_lowercase : Optional[int] = encoder_n_points
_lowercase : Optional[int] = decoder_n_points
_lowercase : Union[str, Any] = two_stage
_lowercase : Optional[Any] = two_stage_num_proposals
_lowercase : List[str] = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.')
# Hungarian matcher
_lowercase : Dict = class_cost
_lowercase : Optional[Any] = bbox_cost
_lowercase : Dict = giou_cost
# Loss coefficients
_lowercase : str = mask_loss_coefficient
_lowercase : Dict = dice_loss_coefficient
_lowercase : Optional[int] = bbox_loss_coefficient
_lowercase : Optional[int] = giou_loss_coefficient
_lowercase : Tuple = eos_coefficient
_lowercase : List[str] = focal_alpha
_lowercase : Dict = disable_custom_kernels
super().__init__(is_encoder_decoder=lowerCamelCase, **lowerCamelCase)
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.encoder_attention_heads
@property
def UpperCamelCase ( self) -> int:
"""simple docstring"""
return self.d_model
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Tuple = copy.deepcopy(self.__dict__)
if self.backbone_config is not None:
_lowercase : str = self.backbone_config.to_dict()
_lowercase : List[str] = self.__class__.model_type
return output
| 89
|
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( UpperCAmelCase_ , unittest.TestCase):
"""simple docstring"""
_A = AudioLDMPipeline
_A = TEXT_TO_AUDIO_PARAMS
_A = TEXT_TO_AUDIO_BATCH_PARAMS
_A = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
])
def _a (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(32, 64) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__a , )
lowerCamelCase = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__a , set_alpha_to_one=__a , )
torch.manual_seed(0 )
lowerCamelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
lowerCamelCase = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , )
lowerCamelCase = ClapTextModelWithProjection(__a )
lowerCamelCase = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=77 )
lowerCamelCase = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__a , )
lowerCamelCase = SpeechTaHifiGan(__a )
lowerCamelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _a (self , __a , __a=0 ):
'''simple docstring'''
if str(__a ).startswith("mps" ):
lowerCamelCase = torch.manual_seed(__a )
else:
lowerCamelCase = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 2_56
lowerCamelCase = audio[:10]
lowerCamelCase = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs["prompt"]]
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs.pop("prompt" )]
lowerCamelCase = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
lowerCamelCase = text_inputs["input_ids"].to(__a )
lowerCamelCase = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase = F.normalize(__a , dim=-1 )
lowerCamelCase = prompt_embeds
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * ["this is a negative prompt"]
lowerCamelCase = negative_prompt
lowerCamelCase = 3 * [inputs["prompt"]]
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = 3 * [inputs.pop("prompt" )]
lowerCamelCase = []
for p in [prompt, negative_prompt]:
lowerCamelCase = audioldm_pipe.tokenizer(
__a , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__a , return_tensors="pt" , )
lowerCamelCase = text_inputs["input_ids"].to(__a )
lowerCamelCase = audioldm_pipe.text_encoder(
__a , )
lowerCamelCase = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
lowerCamelCase = F.normalize(__a , dim=-1 )
embeds.append(__a )
lowerCamelCase , lowerCamelCase = embeds
# forward
lowerCamelCase = audioldm_pipe(**__a )
lowerCamelCase = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = "egg cracking"
lowerCamelCase = audioldm_pipe(**__a , negative_prompt=__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) == 2_56
lowerCamelCase = audio[:10]
lowerCamelCase = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = PNDMScheduler(skip_prk_steps=__a )
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_56)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_56)
# test num_waveforms_per_prompt for single prompt
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_56)
# test num_waveforms_per_prompt for batch of prompts
lowerCamelCase = 2
lowerCamelCase = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__a ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56)
def _a (self ):
'''simple docstring'''
lowerCamelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = audioldm_pipe.vocoder.config.sampling_rate
lowerCamelCase = self.get_dummy_inputs(__a )
lowerCamelCase = audioldm_pipe(audio_length_in_s=0.016 , **__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.016
lowerCamelCase = audioldm_pipe(audio_length_in_s=0.032 , **__a )
lowerCamelCase = output.audios[0]
assert audio.ndim == 1
assert len(__a ) / vocoder_sampling_rate == 0.032
def _a (self ):
'''simple docstring'''
lowerCamelCase = self.get_dummy_components()
lowerCamelCase = AudioLDMPipeline(**__a )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = ["hey"]
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase = output.audios.shape
assert audio_shape == (1, 2_56)
lowerCamelCase = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
lowerCamelCase = SpeechTaHifiGan(__a ).to(__a )
lowerCamelCase = audioldm_pipe(__a , num_inference_steps=1 )
lowerCamelCase = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_56)
def _a (self ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__a )
def _a (self ):
'''simple docstring'''
self._test_inference_batch_single_identical(test_mean_pixel_difference=__a )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _a (self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__a )
@slow
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a (self , __a , __a="cpu" , __a=torch.floataa , __a=0 ):
'''simple docstring'''
lowerCamelCase = torch.Generator(device=__a ).manual_seed(__a )
lowerCamelCase = np.random.RandomState(__a ).standard_normal((1, 8, 1_28, 16) )
lowerCamelCase = torch.from_numpy(__a ).to(device=__a , dtype=__a )
lowerCamelCase = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _a (self ):
'''simple docstring'''
lowerCamelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_inputs(__a )
lowerCamelCase = 25
lowerCamelCase = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 8_19_20
lowerCamelCase = audio[7_72_30:7_72_40]
lowerCamelCase = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
lowerCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1E-2
def _a (self ):
'''simple docstring'''
lowerCamelCase = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
lowerCamelCase = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
lowerCamelCase = audioldm_pipe.to(__a )
audioldm_pipe.set_progress_bar_config(disable=__a )
lowerCamelCase = self.get_inputs(__a )
lowerCamelCase = audioldm_pipe(**__a ).audios[0]
assert audio.ndim == 1
assert len(__a ) == 8_19_20
lowerCamelCase = audio[2_77_80:2_77_90]
lowerCamelCase = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
lowerCamelCase = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3E-2
| 623
| 0
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[str] = {
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7],
}
__SCREAMING_SNAKE_CASE : int = Dataset.from_dict(snake_case )
return dataset
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = get_dataset()
__SCREAMING_SNAKE_CASE : int = make_duplicate_clusters(_A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCAmelCase__ ( self : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = get_dataset()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = deduplicate_dataset(_A )
self.assertEqual(len(_A ) , 2 )
print(_A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , _A )
| 131
|
import math
import sys
import cva
import numpy as np
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# For applying gaussian function for each element in matrix.
__SCREAMING_SNAKE_CASE : Dict = math.sqrt(snake_case )
__SCREAMING_SNAKE_CASE : Union[str, Any] = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def a__ ( snake_case , snake_case ):
"""simple docstring"""
# Creates a gaussian kernel of given dimension.
__SCREAMING_SNAKE_CASE : Optional[int] = np.zeros((kernel_size, kernel_size) )
for i in range(0 , snake_case ):
for j in range(0 , snake_case ):
__SCREAMING_SNAKE_CASE : Dict = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(snake_case , snake_case )
def a__ ( snake_case , snake_case , snake_case , snake_case , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = np.zeros(img.shape )
__SCREAMING_SNAKE_CASE : int = get_gauss_kernel(snake_case , snake_case )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
__SCREAMING_SNAKE_CASE : Tuple = get_slice(snake_case , snake_case , snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Optional[int] = img_s - img_s[kernel_size // 2, kernel_size // 2]
__SCREAMING_SNAKE_CASE : Dict = vec_gaussian(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Any = np.multiply(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : Tuple = np.multiply(snake_case , snake_case )
__SCREAMING_SNAKE_CASE : int = np.sum(snake_case ) / np.sum(snake_case )
__SCREAMING_SNAKE_CASE : Dict = val
return imga
def a__ ( snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = args[1] if args[1:] else '''../image_data/lena.jpg'''
__SCREAMING_SNAKE_CASE : Dict = float(args[2] ) if args[2:] else 1.0
__SCREAMING_SNAKE_CASE : List[Any] = float(args[3] ) if args[3:] else 1.0
if args[4:]:
__SCREAMING_SNAKE_CASE : List[Any] = int(args[4] )
__SCREAMING_SNAKE_CASE : Tuple = kernel_size + abs(kernel_size % 2 - 1 )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
lowercase_ , lowercase_ , lowercase_ , lowercase_ = parse_args(sys.argv)
lowercase_ = cva.imread(filename, 0)
cva.imshow("""input image""", img)
lowercase_ = img / 255
lowercase_ = out.astype("""float32""")
lowercase_ = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
lowercase_ = out * 255
lowercase_ = np.uinta(out)
cva.imshow("""output image""", out)
cva.waitKey(0)
cva.destroyAllWindows()
| 131
| 1
|
"""simple docstring"""
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case : np.ndarray , snake_case : np.ndarray , snake_case : float = 1E-1_2 , snake_case : int = 100 , )-> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(snake_case )[0] == np.shape(snake_case )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case )[0] == np.shape(snake_case )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case ) == np.iscomplexobj(snake_case )
UpperCAmelCase__ : str = np.iscomplexobj(snake_case )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : List[str] = 1E1_2
while not convergence:
# Multiple matrix by the vector.
UpperCAmelCase__ : Optional[Any] = np.dot(snake_case , snake_case )
# Normalize the resulting output vector.
UpperCAmelCase__ : Tuple = w / np.linalg.norm(snake_case )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
UpperCAmelCase__ : Any = vector.conj().T if is_complex else vector.T
UpperCAmelCase__ : List[str] = np.dot(snake_case , np.dot(snake_case , snake_case ) )
# Check convergence.
UpperCAmelCase__ : Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Tuple = lambda_
if is_complex:
UpperCAmelCase__ : str = np.real(lambda_ )
return lambda_, vector
def SCREAMING_SNAKE_CASE__ ( )-> None:
'''simple docstring'''
UpperCAmelCase__ : List[Any] = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
UpperCAmelCase__ : Dict = np.array([41, 4, 20] )
UpperCAmelCase__ : Union[str, Any] = real_input_matrix.astype(np.complexaaa )
UpperCAmelCase__ : Optional[int] = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
UpperCAmelCase__ : int = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
UpperCAmelCase__ : List[str] = real_input_matrix
UpperCAmelCase__ : Any = real_vector
elif problem_type == "complex":
UpperCAmelCase__ : str = complex_input_matrix
UpperCAmelCase__ : List[Any] = complex_vector
# Our implementation.
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = power_iteration(snake_case , snake_case )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = np.linalg.eigh(snake_case )
# Last eigenvalue is the maximum one.
UpperCAmelCase__ : str = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
UpperCAmelCase__ : Optional[int] = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case ) - np.abs(snake_case ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 438
|
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
with open(os.path.dirname(snake_case ) + "/p022_names.txt" ) as file:
UpperCAmelCase__ : Tuple = str(file.readlines()[0] )
UpperCAmelCase__ : str = names.replace("\"" , "" ).split("," )
names.sort()
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = 0
for i, name in enumerate(snake_case ):
for letter in name:
name_score += ord(snake_case ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase__ : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 438
| 1
|
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def __snake_case( _lowerCAmelCase ) -> int:
snake_case__ : Tuple = MobileNetVaConfig(layer_norm_eps=0.001 )
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""" )
snake_case__ : Dict = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , __snake_case )
if matches:
snake_case__ : List[str] = float(matches[1] )
snake_case__ : Union[str, Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
snake_case__ : str = 1_001
snake_case__ : Dict = """imagenet-1k-id2label.json"""
snake_case__ : Dict = """huggingface/label-files"""
snake_case__ : str = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type="""dataset""" ) , """r""" ) )
snake_case__ : int = {int(__snake_case ) + 1: v for k, v in idalabel.items()}
snake_case__ : Tuple = """background"""
snake_case__ : Tuple = idalabel
snake_case__ : Tuple = {v: k for k, v in idalabel.items()}
return config
def __snake_case( ) -> Optional[Any]:
snake_case__ : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
snake_case__ : int = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
@torch.no_grad()
def __snake_case( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=False ) -> List[str]:
snake_case__ : str = get_mobilenet_va_config(__snake_case )
# Load 🤗 model
snake_case__ : int = MobileNetVaForImageClassification(__snake_case ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(__snake_case , __snake_case , __snake_case )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
snake_case__ : List[Any] = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
snake_case__ : Any = image_processor(images=prepare_img() , return_tensors="""pt""" )
snake_case__ : List[str] = model(**__snake_case )
snake_case__ : List[str] = outputs.logits
assert logits.shape == (1, 1_001)
if model_name == "mobilenet_v1_1.0_224":
snake_case__ : str = torch.tensor([-4.1739, -1.1233, 3.1205] )
elif model_name == "mobilenet_v1_0.75_192":
snake_case__ : List[str] = torch.tensor([-3.9440, -2.3141, -0.3333] )
else:
snake_case__ : List[Any] = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(__snake_case )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__snake_case )
if push_to_hub:
print("""Pushing to the hub...""" )
snake_case__ : Tuple = """google/""" + model_name
image_processor.push_to_hub(__snake_case )
model.push_to_hub(__snake_case )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__a = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 707
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a = {
"configuration_blip_2": [
"BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Blip2Config",
"Blip2QFormerConfig",
"Blip2VisionConfig",
],
"processing_blip_2": ["Blip2Processor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
"BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Blip2Model",
"Blip2QFormerModel",
"Blip2PreTrainedModel",
"Blip2ForConditionalGeneration",
"Blip2VisionModel",
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 301
| 0
|
from importlib import import_module
from .logging import get_logger
_lowerCamelCase = get_logger(__name__)
class __A :
"""simple docstring"""
def __init__( self , a__ , a__=None):
"""simple docstring"""
_lowerCamelCase : Dict = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith('''__'''):
setattr(self , a__ , getattr(a__ , a__))
_lowerCamelCase : Tuple = module._original_module if isinstance(a__ , _PatchedModuleObj) else module
class __A :
"""simple docstring"""
UpperCAmelCase__ = []
def __init__( self , a__ , a__ , a__ , a__=None):
"""simple docstring"""
_lowerCamelCase : List[str] = obj
_lowerCamelCase : Dict = target
_lowerCamelCase : Any = new
_lowerCamelCase : str = target.split('''.''')[0]
_lowerCamelCase : Dict = {}
_lowerCamelCase : List[Any] = attrs or []
def __enter__( self):
"""simple docstring"""
_lowerCamelCase : Any = self.target.split('''.''')
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(a__)):
try:
_lowerCamelCase : List[Any] = import_module('''.'''.join(submodules[: i + 1]))
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_lowerCamelCase : Union[str, Any] = getattr(self.obj , a__)
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(a__ , _PatchedModuleObj) and obj_attr._original_module is submodule)
):
_lowerCamelCase : Union[str, Any] = obj_attr
# patch at top level
setattr(self.obj , a__ , _PatchedModuleObj(a__ , attrs=self.attrs))
_lowerCamelCase : List[Any] = getattr(self.obj , a__)
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(a__ , a__ , _PatchedModuleObj(getattr(a__ , a__ , a__) , attrs=self.attrs))
_lowerCamelCase : Optional[int] = getattr(a__ , a__)
# finally set the target attribute
setattr(a__ , a__ , self.new)
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_lowerCamelCase : Any = getattr(import_module('''.'''.join(a__)) , a__)
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , a__) is attr_value:
_lowerCamelCase : str = getattr(self.obj , a__)
setattr(self.obj , a__ , self.new)
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_lowerCamelCase : Optional[int] = globals()['''__builtins__'''][target_attr]
setattr(self.obj , a__ , self.new)
else:
raise RuntimeError(F"""Tried to patch attribute {target_attr} instead of a submodule.""")
def __exit__( self , *a__):
"""simple docstring"""
for attr in list(self.original):
setattr(self.obj , a__ , self.original.pop(a__))
def __snake_case ( self):
"""simple docstring"""
self.__enter__()
self._active_patches.append(self)
def __snake_case ( self):
"""simple docstring"""
try:
self._active_patches.remove(self)
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 114
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
'''xlm-mlm-en-2048''': '''https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json''',
'''xlm-mlm-ende-1024''': '''https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-enfr-1024''': '''https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json''',
'''xlm-mlm-enro-1024''': '''https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json''',
'''xlm-mlm-tlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json''',
'''xlm-mlm-xnli15-1024''': '''https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json''',
'''xlm-clm-enfr-1024''': '''https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json''',
'''xlm-clm-ende-1024''': '''https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json''',
'''xlm-mlm-17-1280''': '''https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json''',
'''xlm-mlm-100-1280''': '''https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json''',
}
class A__ ( __SCREAMING_SNAKE_CASE ):
lowerCamelCase__ : Union[str, Any] ="xlm"
lowerCamelCase__ : Any ={
"hidden_size": "emb_dim",
"num_attention_heads": "n_heads",
"num_hidden_layers": "n_layers",
"n_words": "vocab_size", # For backward compatibility
}
def __init__( self , lowerCamelCase=30145 , lowerCamelCase=2048 , lowerCamelCase=12 , lowerCamelCase=16 , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=True , lowerCamelCase=512 , lowerCamelCase=2048**-0.5 , lowerCamelCase=1e-12 , lowerCamelCase=0.0_2 , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=5 , lowerCamelCase=True , lowerCamelCase="first" , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=0.1 , lowerCamelCase=5 , lowerCamelCase=5 , lowerCamelCase=0 , lowerCamelCase=0 , lowerCamelCase=2 , lowerCamelCase=0 , **lowerCamelCase , ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = vocab_size
__magic_name__ : str = emb_dim
__magic_name__ : Union[str, Any] = n_layers
__magic_name__ : Optional[Any] = n_heads
__magic_name__ : Dict = dropout
__magic_name__ : List[str] = attention_dropout
__magic_name__ : Optional[Any] = gelu_activation
__magic_name__ : Any = sinusoidal_embeddings
__magic_name__ : List[Any] = causal
__magic_name__ : Optional[Any] = asm
__magic_name__ : Tuple = n_langs
__magic_name__ : Union[str, Any] = use_lang_emb
__magic_name__ : str = layer_norm_eps
__magic_name__ : int = bos_index
__magic_name__ : int = eos_index
__magic_name__ : Any = pad_index
__magic_name__ : int = unk_index
__magic_name__ : Tuple = mask_index
__magic_name__ : int = is_encoder
__magic_name__ : Any = max_position_embeddings
__magic_name__ : List[Any] = embed_init_std
__magic_name__ : int = init_std
__magic_name__ : Optional[Any] = summary_type
__magic_name__ : List[str] = summary_use_proj
__magic_name__ : Optional[Any] = summary_activation
__magic_name__ : Union[str, Any] = summary_proj_to_labels
__magic_name__ : int = summary_first_dropout
__magic_name__ : Dict = start_n_top
__magic_name__ : int = end_n_top
__magic_name__ : Optional[int] = mask_token_id
__magic_name__ : Dict = lang_id
if "n_words" in kwargs:
__magic_name__ : str = kwargs['''n_words''']
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , **lowerCamelCase )
class A__ ( __SCREAMING_SNAKE_CASE ):
@property
def lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
__magic_name__ : Tuple = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__magic_name__ : Any = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
] )
| 154
| 0
|
"""simple docstring"""
from torch import nn
def A__ ( _UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 150
|
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def A__ ( _UpperCAmelCase : Tuple=None ) -> Any:
'''simple docstring'''
if subparsers is not None:
snake_case__ : List[Any] = subparsers.add_parser("test" )
else:
snake_case__ : Dict = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=_UpperCAmelCase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=_UpperCAmelCase )
return parser
def A__ ( _UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case__ : Optional[int] = script_name
else:
snake_case__ : List[str] = F"""--config_file={args.config_file} {script_name}"""
snake_case__ : List[Any] = ["accelerate-launch"] + test_args.split()
snake_case__ : Any = execute_subprocess_async(_UpperCAmelCase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def A__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = test_command_parser()
snake_case__ : str = parser.parse_args()
test_command(_UpperCAmelCase )
if __name__ == "__main__":
main()
| 150
| 1
|
"""simple docstring"""
import os
from collections.abc import Iterator
def _snake_case ( snake_case__ : str = "." ):
for dir_path, dir_names, filenames in os.walk(snake_case__ ):
A = [d for d in dir_names if d != 'scripts' and d[0] not in '._']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case__ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case__ , snake_case__ ).lstrip('./' )
def _snake_case ( snake_case__ : str ):
return F'{i * " "}*' if i else "\n##"
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case__ ) or old_parts[i] != new_part) and new_part:
print(F'{md_prefix(snake_case__ )} {new_part.replace("_" , " " ).title()}' )
return new_path
def _snake_case ( snake_case__ : str = "." ):
A = ''
for filepath in sorted(good_file_paths(snake_case__ ) ):
A , A = os.path.split(snake_case__ )
if filepath != old_path:
A = print_path(snake_case__ , snake_case__ )
A = (filepath.count(os.sep ) + 1) if filepath else 0
A = F'{filepath}/{filename}'.replace(' ' , '%20' )
A = os.path.splitext(filename.replace('_' , ' ' ).title() )[0]
print(F'{md_prefix(snake_case__ )} [{filename}]({url})' )
if __name__ == "__main__":
print_directory_md('''.''')
| 91
|
'''simple docstring'''
import argparse
import os
import re
_lowerCamelCase : int = "src/transformers/models/auto"
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
_lowerCamelCase : Union[str, Any] = re.compile(R"[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict")
# re pattern that matches identifiers in mappings
_lowerCamelCase : Optional[Any] = re.compile(R"\s*\(\s*\"(\S[^\"]+)\"")
def __lowerCamelCase ( A__ , A__ = False ) -> Any:
"""simple docstring"""
with open(A__ , 'r' , encoding='utf-8' ) as f:
UpperCamelCase = f.read()
UpperCamelCase = content.split('\n' )
UpperCamelCase = []
UpperCamelCase = 0
while line_idx < len(A__ ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
UpperCamelCase = len(re.search(R'^(\s*)\S' , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(' ' * indent + '(' ):
new_lines.append(lines[line_idx] )
line_idx += 1
UpperCamelCase = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
UpperCamelCase = line_idx
while not lines[line_idx].startswith(' ' * indent + ')' ):
line_idx += 1
blocks.append('\n'.join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
UpperCamelCase = sorted(A__ , key=lambda A__ : _re_identifier.search(A__ ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write('\n'.join(A__ ) )
elif "\n".join(A__ ) != content:
return True
def __lowerCamelCase ( A__ = False ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = [os.path.join(A__ , A__ ) for f in os.listdir(A__ ) if f.endswith('.py' )]
UpperCamelCase = [sort_auto_mapping(A__ , overwrite=A__ ) for fname in fnames]
if not overwrite and any(A__ ):
UpperCamelCase = [f for f, d in zip(A__ , A__ ) if d]
raise ValueError(
F"""The following files have auto mappings that need sorting: {', '.join(A__ )}. Run `make style` to fix"""
' this.' )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
_lowerCamelCase : str = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 430
| 0
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def __magic_name__ ( *lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_=True , lowerCAmelCase_=2):
'''simple docstring'''
from .. import __version__
lowerCamelCase_ : Dict = take_from
lowerCamelCase_ : Tuple = ()
if not isinstance(args[0] , lowerCAmelCase_):
lowerCamelCase_ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(lowerCAmelCase_).base_version) >= version.parse(lowerCAmelCase_):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""")
lowerCamelCase_ : int = None
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(lowerCAmelCase_),)
lowerCamelCase_ : Tuple = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_):
values += (getattr(lowerCAmelCase_ , lowerCAmelCase_),)
lowerCamelCase_ : Dict = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
lowerCamelCase_ : str = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
lowerCamelCase_ : Tuple = warning + " " if standard_warn else ""
warnings.warn(warning + message , lowerCAmelCase_ , stacklevel=lowerCAmelCase_)
if isinstance(lowerCAmelCase_ , lowerCAmelCase_) and len(lowerCAmelCase_) > 0:
lowerCamelCase_ : str = inspect.getouterframes(inspect.currentframe())[1]
lowerCamelCase_ : Optional[int] = call_frame.filename
lowerCamelCase_ : Dict = call_frame.lineno
lowerCamelCase_ : Optional[int] = call_frame.function
lowerCamelCase_ ,lowerCamelCase_ : Any = next(iter(deprecated_kwargs.items()))
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""")
if len(lowerCAmelCase_) == 0:
return
elif len(lowerCAmelCase_) == 1:
return values[0]
return values
| 73
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Any = inspect.getfile(accelerate.test_utils )
__UpperCAmelCase : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
__UpperCAmelCase : Tuple = ['''accelerate''', '''launch''']
__UpperCAmelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
__UpperCAmelCase : int = '''default_config.yaml'''
__UpperCAmelCase : Tuple = config_folder / config_file
__UpperCAmelCase : int = config_folder / '''_default_config.yaml'''
__UpperCAmelCase : int = Path('''tests/test_configs''' )
@classmethod
def _UpperCamelCase ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def _UpperCamelCase ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=a_ ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(a_ ), self.test_file_path] , env=os.environ.copy() )
def _UpperCamelCase ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[Any] = '''test-tpu'''
__UpperCAmelCase : Tuple = '''us-central1-a'''
__UpperCAmelCase : Tuple = '''ls'''
__UpperCAmelCase : str = ['''accelerate''', '''tpu-config''']
__UpperCAmelCase : Dict = '''cd /usr/share'''
__UpperCAmelCase : Any = '''tests/test_samples/test_command_file.sh'''
__UpperCAmelCase : Dict = '''Running gcloud compute tpus tpu-vm ssh'''
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Tuple = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Union[str, Any] = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=a_ )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[Any] = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : List[str] = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Dict = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : str = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
def _UpperCamelCase ( self ):
lowerCamelCase_ : Any = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=a_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , a_ , )
| 73
| 1
|
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase : Dict = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase : List[Any] = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class _a :
'''simple docstring'''
def __init__( self ) -> str:
snake_case : List[str] = WATERMARK_BITS
snake_case : List[str] = WatermarkEncoder()
self.encoder.set_watermark("""bits""" ,self.watermark )
def snake_case_ ( self ,__a ) -> Tuple:
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
snake_case : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
snake_case : Union[str, Any] = [self.encoder.encode(__a ,"""dwtDct""" ) for image in images]
snake_case : int = torch.from_numpy(np.array(__a ) ).permute(0 ,3 ,1 ,2 )
snake_case : str = torch.clamp(2 * (images / 255 - 0.5) ,min=-1.0 ,max=1.0 )
return images
| 116
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__a: Dict = logging.get_logger(__name__)
__a: Optional[int] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''efficientnet'''
def __init__( self : Dict , lowerCamelCase : int = 3 , lowerCamelCase : int = 600 , lowerCamelCase : float = 2.0 , lowerCamelCase : float = 3.1 , lowerCamelCase : int = 8 , lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCamelCase : List[int] = [] , lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCamelCase : float = 0.25 , lowerCamelCase : str = "swish" , lowerCamelCase : int = 2560 , lowerCamelCase : str = "mean" , lowerCamelCase : float = 0.02 , lowerCamelCase : float = 0.001 , lowerCamelCase : float = 0.99 , lowerCamelCase : float = 0.5 , lowerCamelCase : float = 0.2 , **lowerCamelCase : List[str] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = width_coefficient
_UpperCAmelCase = depth_coefficient
_UpperCAmelCase = depth_divisor
_UpperCAmelCase = kernel_sizes
_UpperCAmelCase = in_channels
_UpperCAmelCase = out_channels
_UpperCAmelCase = depthwise_padding
_UpperCAmelCase = strides
_UpperCAmelCase = num_block_repeats
_UpperCAmelCase = expand_ratios
_UpperCAmelCase = squeeze_expansion_ratio
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dim
_UpperCAmelCase = pooling_type
_UpperCAmelCase = initializer_range
_UpperCAmelCase = batch_norm_eps
_UpperCAmelCase = batch_norm_momentum
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = drop_connect_rate
_UpperCAmelCase = sum(lowerCamelCase ) * 4
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = version.parse('''1.11''' )
@property
def lowerCamelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def lowerCamelCase ( self : Dict ) -> float:
"""simple docstring"""
return 1E-5
| 108
| 0
|
'''simple docstring'''
from __future__ import annotations
a_ : Optional[Any] = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __lowerCAmelCase ( _UpperCamelCase : Matrix , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> bool:
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> tuple[int, int] | None:
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> Matrix | None:
'''simple docstring'''
if location := find_empty_location(_UpperCamelCase ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
SCREAMING_SNAKE_CASE = digit
if sudoku(_UpperCamelCase ) is not None:
return grid
SCREAMING_SNAKE_CASE = 0
return None
def __lowerCAmelCase ( _UpperCamelCase : Matrix ) -> None:
'''simple docstring'''
for row in grid:
for cell in row:
print(_UpperCamelCase , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
a_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 701
|
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673
| 0
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def SCREAMING_SNAKE_CASE_ ( ) -> Any:
_A = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=_snake_case , default=_snake_case , required=_snake_case , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=_snake_case , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=_snake_case , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=_snake_case , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=_snake_case , default=0 , help='''cuda_id.''' , )
_A = parser.parse_args()
return args
def SCREAMING_SNAKE_CASE_ ( _snake_case :Union[str, Any] , _snake_case :Dict , _snake_case :Any ) -> List[Any]:
if not len(_snake_case ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_A , _A = imgs[0].size
_A = Image.new('''RGB''' , size=(cols * w, rows * h) )
_A , _A = grid.size
for i, img in enumerate(_snake_case ):
grid.paste(_snake_case , box=(i % cols * w, i // cols * h) )
return grid
def SCREAMING_SNAKE_CASE_ ( _snake_case :List[Any] , _snake_case :Union[str, Any]="robotic cat with wings" , _snake_case :List[str]=7.5 , _snake_case :Optional[int]=50 , _snake_case :List[str]=1 , _snake_case :List[str]=42 , ) -> List[str]:
_A = torch.Generator(pipeline.device ).manual_seed(_snake_case )
_A = pipeline(
_snake_case , guidance_scale=_snake_case , num_inference_steps=_snake_case , generator=_snake_case , num_images_per_prompt=_snake_case , ).images
_A = int(math.sqrt(_snake_case ) )
_A = image_grid(_snake_case , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
UpperCAmelCase_ = parse_args()
# Load models and create wrapper for stable diffusion
UpperCAmelCase_ = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="""tokenizer""")
UpperCAmelCase_ = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""text_encoder""")
UpperCAmelCase_ = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="""vae""")
UpperCAmelCase_ = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="""unet""")
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
UpperCAmelCase_ = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, """best_model.pt""")):
UpperCAmelCase_ = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, """unet""", unet)
else:
UpperCAmelCase_ = unet.to(torch.device("""cuda""", args.cuda_id))
UpperCAmelCase_ = pipeline.to(unet.device)
UpperCAmelCase_ ,UpperCAmelCase_ = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, """{}.png""".format("""_""".join(args.caption.split()))))
UpperCAmelCase_ = os.path.join(args.pretrained_model_name_or_path, """_""".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, """{}.png""".format(idx + 1)))
| 2
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
SCREAMING_SNAKE_CASE_:List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_:List[Any] = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = "bit"
__lowerCamelCase : Union[str, Any] = ["preactivation", "bottleneck"]
__lowerCamelCase : Union[str, Any] = ["SAME", "VALID"]
def __init__( self, lowerCamelCase__=3, lowerCamelCase__=64, lowerCamelCase__=[256, 512, 1024, 2048], lowerCamelCase__=[3, 4, 6, 3], lowerCamelCase__="preactivation", lowerCamelCase__="relu", lowerCamelCase__=None, lowerCamelCase__=32, lowerCamelCase__=0.0, lowerCamelCase__=False, lowerCamelCase__=32, lowerCamelCase__=1, lowerCamelCase__=None, lowerCamelCase__=None, **lowerCamelCase__, ):
super().__init__(**lowerCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
A : List[Any] = global_padding.upper()
else:
raise ValueError(f'''Padding strategy {global_padding} not supported''' )
A : Dict = num_channels
A : List[Any] = embedding_size
A : Optional[Any] = hidden_sizes
A : str = depths
A : str = layer_type
A : Union[str, Any] = hidden_act
A : Any = global_padding
A : Optional[int] = num_groups
A : Dict = drop_path_rate
A : List[Any] = embedding_dynamic_padding
A : List[Any] = output_stride
A : Union[str, Any] = width_factor
A : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1, len(lowerCamelCase__ ) + 1 )]
A , A : Any = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__, out_indices=lowerCamelCase__, stage_names=self.stage_names )
| 662
| 0
|
'''simple docstring'''
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""time_embed.0.weight""", """time_embedding.linear_1.weight"""),
("""time_embed.0.bias""", """time_embedding.linear_1.bias"""),
("""time_embed.2.weight""", """time_embedding.linear_2.weight"""),
("""time_embed.2.bias""", """time_embedding.linear_2.bias"""),
("""input_blocks.0.0.weight""", """conv_in.weight"""),
("""input_blocks.0.0.bias""", """conv_in.bias"""),
("""out.0.weight""", """conv_norm_out.weight"""),
("""out.0.bias""", """conv_norm_out.bias"""),
("""out.2.weight""", """conv_out.weight"""),
("""out.2.bias""", """conv_out.bias"""),
]
lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""in_layers.0""", """norm1"""),
("""in_layers.2""", """conv1"""),
("""out_layers.0""", """norm2"""),
("""out_layers.3""", """conv2"""),
("""emb_layers.1""", """time_emb_proj"""),
("""skip_connection""", """conv_shortcut"""),
]
lowerCAmelCase = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowerCAmelCase = F'''down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase = F'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowerCAmelCase = F'''down_blocks.{i}.attentions.{j}.'''
lowerCAmelCase = F'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowerCAmelCase = F'''up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase = F'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowerCAmelCase = F'''up_blocks.{i}.attentions.{j}.'''
lowerCAmelCase = F'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowerCAmelCase = F'''down_blocks.{i}.downsamplers.0.conv.'''
lowerCAmelCase = F'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowerCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase = F'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowerCAmelCase = """mid_block.attentions.0."""
lowerCAmelCase = """middle_block.1."""
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowerCAmelCase = F'''mid_block.resnets.{j}.'''
lowerCAmelCase = F'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def __A ( a_ : str ):
# buyer beware: this is a *brittle* function,
# and correct output requires that all of these pieces interact in
# the exact order in which I have arranged them.
lowerCAmelCase : Union[str, Any] = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
lowerCAmelCase : Optional[Any] = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
lowerCAmelCase : Tuple = v.replace(a_ ,a_ )
lowerCAmelCase : Union[str, Any] = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
lowerCAmelCase : Optional[Any] = v.replace(a_ ,a_ )
lowerCAmelCase : Any = v
lowerCAmelCase : Optional[Any] = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""nin_shortcut""", """conv_shortcut"""),
("""norm_out""", """conv_norm_out"""),
("""mid.attn_1.""", """mid_block.attentions.0."""),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowerCAmelCase = F'''encoder.down_blocks.{i}.resnets.{j}.'''
lowerCAmelCase = F'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowerCAmelCase = F'''down_blocks.{i}.downsamplers.0.'''
lowerCAmelCase = F'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowerCAmelCase = F'''up_blocks.{i}.upsamplers.0.'''
lowerCAmelCase = F'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowerCAmelCase = F'''decoder.up_blocks.{i}.resnets.{j}.'''
lowerCAmelCase = F'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowerCAmelCase = F'''mid_block.resnets.{i}.'''
lowerCAmelCase = F'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""norm.""", """group_norm."""),
("""q.""", """query."""),
("""k.""", """key."""),
("""v.""", """value."""),
("""proj_out.""", """proj_attn."""),
]
def __A ( a_ : Optional[int] ):
# convert HF linear weights to SD conv2d weights
return w.reshape(*w.shape ,1 ,1 )
def __A ( a_ : Any ):
lowerCAmelCase : int = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
lowerCAmelCase : Any = v.replace(a_ ,a_ )
lowerCAmelCase : List[str] = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
lowerCAmelCase : Union[str, Any] = v.replace(a_ ,a_ )
lowerCAmelCase : Dict = v
lowerCAmelCase : str = {v: vae_state_dict[k] for k, v in mapping.items()}
lowerCAmelCase : Union[str, Any] = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f'''mid.attn_1.{weight_name}.weight''' in k:
print(f'''Reshaping {k} for SD format''' )
lowerCAmelCase : Any = reshape_weight_for_sd(a_ )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowerCAmelCase = [
# (stable-diffusion, HF Diffusers)
("""resblocks.""", """text_model.encoder.layers."""),
("""ln_1""", """layer_norm1"""),
("""ln_2""", """layer_norm2"""),
(""".c_fc.""", """.fc1."""),
(""".c_proj.""", """.fc2."""),
(""".attn""", """.self_attn"""),
("""ln_final.""", """transformer.text_model.final_layer_norm."""),
("""token_embedding.weight""", """transformer.text_model.embeddings.token_embedding.weight"""),
("""positional_embedding""", """transformer.text_model.embeddings.position_embedding.weight"""),
]
lowerCAmelCase = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowerCAmelCase = re.compile("""|""".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowerCAmelCase = {"""q""": 0, """k""": 1, """v""": 2}
def __A ( a_ : str ):
lowerCAmelCase : Any = {}
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : Any = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
lowerCAmelCase : Union[str, Any] = k[: -len(".q_proj.weight" )]
lowerCAmelCase : Optional[Any] = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
lowerCAmelCase : str = [None, None, None]
lowerCAmelCase : Dict = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
lowerCAmelCase : Optional[int] = k[: -len(".q_proj.bias" )]
lowerCAmelCase : Dict = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
lowerCAmelCase : Optional[Any] = [None, None, None]
lowerCAmelCase : Tuple = v
continue
lowerCAmelCase : Union[str, Any] = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] ,a_ )
lowerCAmelCase : List[str] = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase : int = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] ,a_ )
lowerCAmelCase : Any = torch.cat(a_ )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
lowerCAmelCase : Dict = textenc_pattern.sub(lambda a_ : protected[re.escape(m.group(0 ) )] ,a_ )
lowerCAmelCase : List[Any] = torch.cat(a_ )
return new_state_dict
def __A ( a_ : Tuple ):
return text_enc_dict
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument("""--half""", action="""store_true""", help="""Save weights in half precision.""")
parser.add_argument(
"""--use_safetensors""", action="""store_true""", help="""Save weights use safetensors, default is ckpt."""
)
lowerCAmelCase = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowerCAmelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.safetensors""")
lowerCAmelCase = osp.join(args.model_path, """text_encoder""", """model.safetensors""")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowerCAmelCase = load_file(unet_path, device="""cpu""")
else:
lowerCAmelCase = osp.join(args.model_path, """unet""", """diffusion_pytorch_model.bin""")
lowerCAmelCase = torch.load(unet_path, map_location="""cpu""")
if osp.exists(vae_path):
lowerCAmelCase = load_file(vae_path, device="""cpu""")
else:
lowerCAmelCase = osp.join(args.model_path, """vae""", """diffusion_pytorch_model.bin""")
lowerCAmelCase = torch.load(vae_path, map_location="""cpu""")
if osp.exists(text_enc_path):
lowerCAmelCase = load_file(text_enc_path, device="""cpu""")
else:
lowerCAmelCase = osp.join(args.model_path, """text_encoder""", """pytorch_model.bin""")
lowerCAmelCase = torch.load(text_enc_path, map_location="""cpu""")
# Convert the UNet model
lowerCAmelCase = convert_unet_state_dict(unet_state_dict)
lowerCAmelCase = {"""model.diffusion_model.""" + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowerCAmelCase = convert_vae_state_dict(vae_state_dict)
lowerCAmelCase = {"""first_stage_model.""" + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowerCAmelCase = """text_model.encoder.layers.22.layer_norm2.bias""" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowerCAmelCase = {"""transformer.""" + k: v for k, v in text_enc_dict.items()}
lowerCAmelCase = convert_text_enc_state_dict_vaa(text_enc_dict)
lowerCAmelCase = {"""cond_stage_model.model.""" + k: v for k, v in text_enc_dict.items()}
else:
lowerCAmelCase = convert_text_enc_state_dict(text_enc_dict)
lowerCAmelCase = {"""cond_stage_model.transformer.""" + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowerCAmelCase = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowerCAmelCase = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowerCAmelCase = {"""state_dict""": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 551
|
'''simple docstring'''
import numpy as np
def __A ( a_ : np.array ):
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 551
| 1
|
'''simple docstring'''
def lowerCamelCase (_SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(A__ ):
for j in range(A__ ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def lowerCamelCase (_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] ):
__a : Any = [[float('inf' ) for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
__a : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(A__ ):
# looping through rows of graph array
for i in range(A__ ):
# looping through columns of graph array
for j in range(A__ ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
__a : Dict = dist[i][k] + dist[k][j]
_print_dist(A__ , A__ )
return dist, v
if __name__ == "__main__":
__lowercase : Any = int(input('Enter number of vertices: '))
__lowercase : Optional[Any] = int(input('Enter number of edges: '))
__lowercase : str = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
__lowercase : Optional[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
__lowercase : Union[str, Any] = int(input('Enter source:'))
__lowercase : List[Any] = int(input('Enter destination:'))
__lowercase : List[Any] = float(input('Enter weight:'))
__lowercase : List[str] = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 476
|
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
lowerCamelCase_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def snake_case ( A__ ,A__ ,A__ ,A__ ,A__ ,A__ ):
if got_ver is None or want_ver is None:
raise ValueError(
F"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
F""" reinstalling {pkg}.""" )
if not ops[op](version.parse(A__ ) ,version.parse(A__ ) ):
raise ImportError(
F"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def snake_case ( A__ ,A__ = None ):
UpperCAmelCase_ : int = F"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" ,A__ ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = requirement, None, None
else:
UpperCAmelCase_ : Optional[int] = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
F""" got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = match[0]
UpperCAmelCase_ : Optional[Any] = want_full.split("," ) # there could be multiple requirements
UpperCAmelCase_ : int = {}
for w in want_range:
UpperCAmelCase_ : str = re.findall(r"^([\s!=<>]{1,2})(.+)" ,A__ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
F""" but got {requirement}""" )
UpperCAmelCase_ , UpperCAmelCase_ : Any = match[0]
UpperCAmelCase_ : Union[str, Any] = want_ver
if op not in ops:
raise ValueError(F"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
UpperCAmelCase_ : List[Any] = ".".join([str(A__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
return
# check if any version is installed
try:
UpperCAmelCase_ : str = importlib.metadata.version(A__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(A__ ,A__ ,A__ ,A__ ,A__ ,A__ )
def snake_case ( A__ ):
UpperCAmelCase_ : Any = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(A__ ,A__ )
| 95
| 0
|
# Algorithm for the pigeonhole sorting
def snake_case_ ( _SCREAMING_SNAKE_CASE ):
__lowercase = min(_SCREAMING_SNAKE_CASE ) # min() finds the minimum value
__lowercase = max(_SCREAMING_SNAKE_CASE ) # max() finds the maximum value
__lowercase = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
__lowercase = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
__lowercase = 0
for count in range(_SCREAMING_SNAKE_CASE ):
while holes[count] > 0:
holes[count] -= 1
__lowercase = count + min_val
i += 1
def snake_case_ ( ):
__lowercase = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_SCREAMING_SNAKE_CASE )
print("Sorted order is:" , " ".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
main()
| 711
|
def snake_case_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("The length of profit and weight must be same." )
if max_weight <= 0:
raise ValueError("max_weight must greater than zero." )
if any(p < 0 for p in profit ):
raise ValueError("Profit can not be negative." )
if any(w < 0 for w in weight ):
raise ValueError("Weight can not be negative." )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
__lowercase = [p / w for p, w in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )]
# Creating a copy of the list and sorting profit/weight in ascending order
__lowercase = sorted(_SCREAMING_SNAKE_CASE )
# declaring useful variables
__lowercase = len(_SCREAMING_SNAKE_CASE )
__lowercase = 0
__lowercase = 0
__lowercase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
__lowercase = sorted_profit_by_weight[length - i - 1]
__lowercase = profit_by_weight.index(_SCREAMING_SNAKE_CASE )
__lowercase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
snake_case__ : str = [int(x) for x in input("""Input profits separated by spaces: """).split()]
snake_case__ : str = [int(x) for x in input("""Input weights separated by spaces: """).split()]
snake_case__ : Optional[Any] = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 655
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE : Tuple = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Any = [
"""MRA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MraForMaskedLM""",
"""MraForMultipleChoice""",
"""MraForQuestionAnswering""",
"""MraForSequenceClassification""",
"""MraForTokenClassification""",
"""MraLayer""",
"""MraModel""",
"""MraPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 141
|
'''simple docstring'''
import warnings
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
__a =["input_ids", "attention_mask"]
def __init__( self , lowerCamelCase="</s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase=125 , lowerCamelCase=None , **lowerCamelCase , ) ->None:
'''simple docstring'''
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a = [F"""<extra_id_{i}>""" for i in range(lowerCamelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
__a = len(set(filter(lambda lowerCamelCase : bool('extra_id' in str(lowerCamelCase ) ) , lowerCamelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
' provided to ByT5Tokenizer. In this case the additional_special_tokens must include the'
' extra_ids tokens' )
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else pad_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else eos_token
__a = AddedToken(lowerCamelCase , lstrip=lowerCamelCase , rstrip=lowerCamelCase ) if isinstance(lowerCamelCase , lowerCamelCase ) else unk_token
super().__init__(
eos_token=lowerCamelCase , unk_token=lowerCamelCase , pad_token=lowerCamelCase , extra_ids=lowerCamelCase , additional_special_tokens=lowerCamelCase , **lowerCamelCase , )
__a = extra_ids
__a = 2**8 # utf is 8 bits
# define special tokens dict
__a = {
self.pad_token: 0,
self.eos_token: 1,
self.unk_token: 2,
}
__a = len(self.special_tokens_encoder )
__a = len(lowerCamelCase )
for i, token in enumerate(lowerCamelCase ):
__a = self.vocab_size + i - n
__a = {v: k for k, v in self.special_tokens_encoder.items()}
@property
def __UpperCamelCase ( self ) ->Any:
'''simple docstring'''
return self._utf_vocab_size + self._num_special_tokens + self._extra_ids
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None , lowerCamelCase = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase , token_ids_a=lowerCamelCase , already_has_special_tokens=lowerCamelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(lowerCamelCase )) + [1]
return ([0] * len(lowerCamelCase )) + [1] + ([0] * len(lowerCamelCase )) + [1]
def __UpperCamelCase ( self , lowerCamelCase ) ->List[int]:
'''simple docstring'''
if len(lowerCamelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
' eos tokens being added.' )
return token_ids
else:
return token_ids + [self.eos_token_id]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->List[int]:
'''simple docstring'''
__a = self._add_eos_if_not_present(lowerCamelCase )
if token_ids_a is None:
return token_ids_a
else:
__a = self._add_eos_if_not_present(lowerCamelCase )
return token_ids_a + token_ids_a
def __UpperCamelCase ( self , lowerCamelCase ) ->List[str]:
'''simple docstring'''
__a = [chr(lowerCamelCase ) for i in text.encode('utf-8' )]
return tokens
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
if token in self.special_tokens_encoder:
__a = self.special_tokens_encoder[token]
elif token in self.added_tokens_encoder:
__a = self.added_tokens_encoder[token]
elif len(lowerCamelCase ) != 1:
__a = self.unk_token_id
else:
__a = ord(lowerCamelCase ) + self._num_special_tokens
return token_id
def __UpperCamelCase ( self , lowerCamelCase ) ->Tuple:
'''simple docstring'''
if index in self.special_tokens_decoder:
__a = self.special_tokens_decoder[index]
else:
__a = chr(index - self._num_special_tokens )
return token
def __UpperCamelCase ( self , lowerCamelCase ) ->Optional[Any]:
'''simple docstring'''
__a = B''
for token in tokens:
if token in self.special_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.added_tokens_decoder:
__a = self.special_tokens_decoder[token].encode('utf-8' )
elif token in self.special_tokens_encoder:
__a = token.encode('utf-8' )
elif token in self.added_tokens_encoder:
__a = token.encode('utf-8' )
else:
__a = bytes([ord(lowerCamelCase )] )
bstring += tok_string
__a = bstring.decode('utf-8' , errors='ignore' )
return string
def __UpperCamelCase ( self , lowerCamelCase , lowerCamelCase = None ) ->Tuple[str]:
'''simple docstring'''
return ()
| 448
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[str] = """beit"""
def __init__( self , __UpperCAmelCase=8_1_9_2 , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=2_2_4 , __UpperCAmelCase=1_6 , __UpperCAmelCase=3 , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=True , __UpperCAmelCase=[3, 5, 7, 1_1] , __UpperCAmelCase=[1, 2, 3, 6] , __UpperCAmelCase=True , __UpperCAmelCase=0.4 , __UpperCAmelCase=2_5_6 , __UpperCAmelCase=1 , __UpperCAmelCase=False , __UpperCAmelCase=2_5_5 , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
lowerCAmelCase__ :Tuple = vocab_size
lowerCAmelCase__ :int = hidden_size
lowerCAmelCase__ :Any = num_hidden_layers
lowerCAmelCase__ :Tuple = num_attention_heads
lowerCAmelCase__ :int = intermediate_size
lowerCAmelCase__ :str = hidden_act
lowerCAmelCase__ :List[Any] = hidden_dropout_prob
lowerCAmelCase__ :Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ :Union[str, Any] = initializer_range
lowerCAmelCase__ :Union[str, Any] = layer_norm_eps
lowerCAmelCase__ :List[Any] = image_size
lowerCAmelCase__ :Union[str, Any] = patch_size
lowerCAmelCase__ :Optional[Any] = num_channels
lowerCAmelCase__ :Union[str, Any] = use_mask_token
lowerCAmelCase__ :Union[str, Any] = use_absolute_position_embeddings
lowerCAmelCase__ :str = use_relative_position_bias
lowerCAmelCase__ :str = use_shared_relative_position_bias
lowerCAmelCase__ :str = layer_scale_init_value
lowerCAmelCase__ :Optional[Any] = drop_path_rate
lowerCAmelCase__ :Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCAmelCase__ :Any = out_indices
lowerCAmelCase__ :str = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCAmelCase__ :Union[str, Any] = use_auxiliary_head
lowerCAmelCase__ :List[str] = auxiliary_loss_weight
lowerCAmelCase__ :str = auxiliary_channels
lowerCAmelCase__ :List[Any] = auxiliary_num_convs
lowerCAmelCase__ :str = auxiliary_concat_input
lowerCAmelCase__ :Tuple = semantic_loss_ignore_index
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :Optional[int] = version.parse("""1.11""" )
@property
def snake_case ( self ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def snake_case ( self ):
'''simple docstring'''
return 1E-4
| 560
|
"""simple docstring"""
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :Optional[int] = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Dict = len(__UpperCAmelCase )
lowerCAmelCase__ :str = None
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if sources is int:
lowerCAmelCase__ :List[str] = [sources]
if sinks is int:
lowerCAmelCase__ :Optional[Any] = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
lowerCAmelCase__ :List[str] = sources[0]
lowerCAmelCase__ :List[str] = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
lowerCAmelCase__ :Tuple = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ :List[str] = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ :Any = max_input_flow
lowerCAmelCase__ :Optional[Any] = 0
lowerCAmelCase__ :Optional[int] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ :Optional[int] = max_input_flow
lowerCAmelCase__ :Tuple = size - 1
def snake_case ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = algorithm(self )
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = flow_network
lowerCAmelCase__ :List[Any] = flow_network.verticesCount
lowerCAmelCase__ :Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ :Tuple = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ :Optional[int] = flow_network.graph
lowerCAmelCase__ :List[str] = False
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
lowerCAmelCase__ :List[Any] = True
def snake_case ( self ):
'''simple docstring'''
pass
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
lowerCAmelCase__ :Dict = -1
def snake_case ( self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class _lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
lowerCAmelCase__ :Tuple = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ :int = [0] * self.verticies_count
lowerCAmelCase__ :str = [0] * self.verticies_count
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Any = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ :str = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ :int = 0
while i < len(__UpperCAmelCase ):
lowerCAmelCase__ :Tuple = vertices_list[i]
lowerCAmelCase__ :List[Any] = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
lowerCAmelCase__ :int = 0
else:
i += 1
lowerCAmelCase__ :Tuple = sum(self.preflow[self.source_index] )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ :Any = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ :Any = min_height + 1
if __name__ == "__main__":
__A = [0]
__A = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
__A = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
__A = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
__A = flow_network.find_maximum_flow()
print(F'''maximum flow is {maximum_flow}''')
| 560
| 1
|
'''simple docstring'''
import random
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] ) -> Optional[Any]:
_lowerCAmelCase : Tuple = a[left_index]
_lowerCAmelCase : Optional[Any] = left_index + 1
for j in range(left_index + 1 , _lowerCamelCase ):
if a[j] < pivot:
_lowerCAmelCase , _lowerCAmelCase : Dict = a[i], a[j]
i += 1
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = a[i - 1], a[left_index]
return i - 1
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int , _lowerCamelCase : List[str] ) -> Dict:
if left < right:
_lowerCAmelCase : str = random.randint(_lowerCamelCase , right - 1 )
_lowerCAmelCase , _lowerCAmelCase : Any = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
_lowerCAmelCase : Tuple = partition(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
quick_sort_random(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_lowerCamelCase , pivot_index + 1 , _lowerCamelCase ) # recursive quicksort to the right of the pivot point
def _UpperCAmelCase ( ) -> List[str]:
_lowerCAmelCase : Tuple = input("""Enter numbers separated by a comma:\n""" ).strip()
_lowerCAmelCase : List[str] = [int(_lowerCamelCase ) for item in user_input.split(""",""" )]
quick_sort_random(_lowerCamelCase , 0 , len(_lowerCamelCase ) )
print(_lowerCamelCase )
if __name__ == "__main__":
main()
| 384
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = """ResNetConfig"""
# Base docstring
UpperCamelCase_ = """microsoft/resnet-50"""
UpperCamelCase_ = [1, 20_48, 7, 7]
# Image classification docstring
UpperCamelCase_ = """microsoft/resnet-50"""
UpperCamelCase_ = """tiger cat"""
UpperCamelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 3 , snake_case_ = 1 , snake_case_ = "relu" ):
super().__init__()
_lowerCAmelCase : List[str] = nn.Convad(
snake_case_ , snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=kernel_size // 2 , bias=snake_case_ )
_lowerCAmelCase : Tuple = nn.BatchNormad(snake_case_ )
_lowerCAmelCase : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.convolution(snake_case_ )
_lowerCAmelCase : int = self.normalization(snake_case_ )
_lowerCAmelCase : str = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ ):
super().__init__()
_lowerCAmelCase : str = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_lowerCAmelCase : Any = config.num_channels
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_lowerCAmelCase : int = self.embedder(snake_case_ )
_lowerCAmelCase : Dict = self.pooler(snake_case_ )
return embedding
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 2 ):
super().__init__()
_lowerCAmelCase : List[Any] = nn.Convad(snake_case_ , snake_case_ , kernel_size=1 , stride=snake_case_ , bias=snake_case_ )
_lowerCAmelCase : Union[str, Any] = nn.BatchNormad(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = self.convolution(snake_case_ )
_lowerCAmelCase : Any = self.normalization(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = "relu" ):
super().__init__()
_lowerCAmelCase : Dict = in_channels != out_channels or stride != 1
_lowerCAmelCase : List[str] = (
ResNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : List[str] = nn.Sequential(
ResNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ ) , ResNetConvLayer(snake_case_ , snake_case_ , activation=snake_case_ ) , )
_lowerCAmelCase : Tuple = ACTaFN[activation]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = hidden_state
_lowerCAmelCase : Union[str, Any] = self.layer(snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.shortcut(snake_case_ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = "relu" , snake_case_ = 4 ):
super().__init__()
_lowerCAmelCase : Tuple = in_channels != out_channels or stride != 1
_lowerCAmelCase : int = out_channels // reduction
_lowerCAmelCase : Any = (
ResNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : Dict = nn.Sequential(
ResNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 ) , ResNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ ) , ResNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=snake_case_ ) , )
_lowerCAmelCase : Optional[Any] = ACTaFN[activation]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = hidden_state
_lowerCAmelCase : Optional[Any] = self.layer(snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.shortcut(snake_case_ )
hidden_state += residual
_lowerCAmelCase : Any = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 2 , snake_case_ = 2 , ):
super().__init__()
_lowerCAmelCase : Optional[Any] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
_lowerCAmelCase : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(snake_case_ , snake_case_ , stride=snake_case_ , activation=config.hidden_act ) , *[layer(snake_case_ , snake_case_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = input
for layer in self.layers:
_lowerCAmelCase : List[Any] = layer(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ ):
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
snake_case_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case_ , config.depths[1:] ):
self.stages.append(ResNetStage(snake_case_ , snake_case_ , snake_case_ , depth=snake_case_ ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = True ):
_lowerCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : Any = hidden_states + (hidden_state,)
_lowerCAmelCase : Dict = stage_module(snake_case_ )
if output_hidden_states:
_lowerCAmelCase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case_ , hidden_states=snake_case_ , )
class a_ (_a ):
__lowerCAmelCase : str = ResNetConfig
__lowerCAmelCase : Dict = """resnet"""
__lowerCAmelCase : List[str] = """pixel_values"""
__lowerCAmelCase : Any = True
def __UpperCamelCase ( self , snake_case_ ):
if isinstance(snake_case_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCamelCase ( self , snake_case_ , snake_case_=False ):
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = value
UpperCamelCase_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCamelCase_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , _a , )
class a_ (_a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : List[Any] = ResNetEmbeddings(snake_case_ )
_lowerCAmelCase : List[Any] = ResNetEncoder(snake_case_ )
_lowerCAmelCase : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None ):
_lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Union[str, Any] = self.embedder(snake_case_ )
_lowerCAmelCase : Tuple = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : int = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(snake_case_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _a , )
class a_ (_a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
_lowerCAmelCase : Union[str, Any] = config.num_labels
_lowerCAmelCase : Any = ResNetModel(snake_case_ )
# classification head
_lowerCAmelCase : List[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
_lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Tuple = self.resnet(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : int = self.classifier(snake_case_ )
_lowerCAmelCase : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : Tuple = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Any = """single_label_classification"""
else:
_lowerCAmelCase : Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : Union[str, Any] = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : int = CrossEntropyLoss()
_lowerCAmelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : List[Any] = BCEWithLogitsLoss()
_lowerCAmelCase : List[Any] = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
_lowerCAmelCase : List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , _a , )
class a_ (_a , _a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
super()._init_backbone(snake_case_ )
_lowerCAmelCase : List[Any] = [config.embedding_size] + config.hidden_sizes
_lowerCAmelCase : List[Any] = ResNetEmbeddings(snake_case_ )
_lowerCAmelCase : str = ResNetEncoder(snake_case_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@replace_return_docstrings(output_type=snake_case_ , config_class=_CONFIG_FOR_DOC )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None ):
_lowerCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[int] = self.embedder(snake_case_ )
_lowerCAmelCase : List[Any] = self.encoder(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : Any = outputs.hidden_states
_lowerCAmelCase : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowerCAmelCase : Any = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=snake_case_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case_ , )
| 384
| 1
|
def __lowerCAmelCase ( __lowerCamelCase : int ) -> int:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
__lowerCAmelCase =f"""Input value of [number={number}] must be an integer"""
raise TypeError(__lowerCamelCase )
if number < 1:
__lowerCAmelCase =f"""Input value of [number={number}] must be > 0"""
raise ValueError(__lowerCamelCase )
__lowerCAmelCase =1
for i in range(1 , __lowerCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 456
|
def __lowerCAmelCase ( ) -> Tuple:
__lowerCAmelCase =[]
__lowerCAmelCase =1
while len(__lowerCamelCase ) < 1E6:
constant.append(str(__lowerCamelCase ) )
i += 1
__lowerCAmelCase ="""""".join(__lowerCamelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 456
| 1
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=A__ )
class __UpperCamelCase ( A__ ):
__A : str = field(default="""language-modeling""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
__A : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
__A : ClassVar[Features] = Features({} )
__A : str = "text"
@property
def UpperCamelCase( self ):
return {self.text_column: "text"}
| 32
|
UpperCAmelCase_ = {
"A": ".-", "B": "-...", "C": "-.-.", "D": "-..", "E": ".", "F": "..-.", "G": "--.",
"H": "....", "I": "..", "J": ".---", "K": "-.-", "L": ".-..", "M": "--", "N": "-.",
"O": "---", "P": ".--.", "Q": "--.-", "R": ".-.", "S": "...", "T": "-", "U": "..-",
"V": "...-", "W": ".--", "X": "-..-", "Y": "-.--", "Z": "--..", "1": ".----",
"2": "..---", "3": "...--", "4": "....-", "5": ".....", "6": "-....", "7": "--...",
"8": "---..", "9": "----.", "0": "-----", "&": ".-...", "@": ".--.-.",
":": "---...", ",": "--..--", ".": ".-.-.-", "'": ".----.", "\"": ".-..-.",
"?": "..--..", "/": "-..-.", "=": "-...-", "+": ".-.-.", "-": "-....-",
"(": "-.--.", ")": "-.--.-", "!": "-.-.--", " ": "/"
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
UpperCAmelCase_ = {value: key for key, value in MORSE_CODE_DICT.items()}
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def A__ ( SCREAMING_SNAKE_CASE_ : str ) -> str:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def A__ ( ) -> None:
"""simple docstring"""
_UpperCAmelCase = '''Morse code here!'''
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = encrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
_UpperCAmelCase = decrypt(SCREAMING_SNAKE_CASE_ )
print(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 32
| 1
|
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowerCamelCase : Any = numpy.array([0, 0])
_lowerCamelCase : str = numpy.array([0.5, 0.8660254])
_lowerCamelCase : int = numpy.array([1, 0])
_lowerCamelCase : List[Any] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _lowerCAmelCase ( __magic_name__ :list[numpy.ndarray] , __magic_name__ :int ):
UpperCAmelCase_ = initial_vectors
for _ in range(__magic_name__ ):
UpperCAmelCase_ = iteration_step(__magic_name__ )
return vectors
def _lowerCAmelCase ( __magic_name__ :list[numpy.ndarray] ):
UpperCAmelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase_ = vectors[i + 1]
new_vectors.append(__magic_name__ )
UpperCAmelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 6_0 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _lowerCAmelCase ( __magic_name__ :numpy.ndarray , __magic_name__ :float ):
UpperCAmelCase_ = numpy.radians(__magic_name__ )
UpperCAmelCase_, UpperCAmelCase_ = numpy.cos(__magic_name__ ), numpy.sin(__magic_name__ )
UpperCAmelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(__magic_name__ , __magic_name__ )
def _lowerCAmelCase ( __magic_name__ :list[numpy.ndarray] ):
UpperCAmelCase_ = plt.gca()
axes.set_aspect('''equal''' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase_, UpperCAmelCase_ = zip(*__magic_name__ )
plt.plot(__magic_name__ , __magic_name__ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : Union[str, Any] = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 407
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = np.array([[1, item, train_mtch[i]] for i, item in enumerate(__magic_name__ )] )
UpperCAmelCase_ = np.array(__magic_name__ )
UpperCAmelCase_ = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , __magic_name__ ) ) , x.transpose() ) , __magic_name__ )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = (1, 2, 1)
UpperCAmelCase_ = (1, 1, 0, 7)
UpperCAmelCase_ = SARIMAX(
__magic_name__ , exog=__magic_name__ , order=__magic_name__ , seasonal_order=__magic_name__ )
UpperCAmelCase_ = model.fit(disp=__magic_name__ , maxiter=6_0_0 , method='''nm''' )
UpperCAmelCase_ = model_fit.predict(1 , len(__magic_name__ ) , exog=[test_match] )
return result[0]
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :list , __magic_name__ :list ):
UpperCAmelCase_ = SVR(kernel='''rbf''' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(__magic_name__ , __magic_name__ )
UpperCAmelCase_ = regressor.predict(__magic_name__ )
return y_pred[0]
def _lowerCAmelCase ( __magic_name__ :list ):
train_user.sort()
UpperCAmelCase_ = np.percentile(__magic_name__ , 2_5 )
UpperCAmelCase_ = np.percentile(__magic_name__ , 7_5 )
UpperCAmelCase_ = qa - qa
UpperCAmelCase_ = qa - (iqr * 0.1)
return low_lim
def _lowerCAmelCase ( __magic_name__ :list , __magic_name__ :float ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in list_vote:
if i > actual_result:
UpperCAmelCase_ = not_safe + 1
else:
if abs(abs(__magic_name__ ) - abs(__magic_name__ ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowerCamelCase : List[str] = [[1_8231, 0.0, 1], [2_2621, 1.0, 2], [1_5675, 0.0, 3], [2_3583, 1.0, 4]]
_lowerCamelCase : Any = pd.DataFrame(
data_input, columns=['total_user', 'total_even', 'days']
)
_lowerCamelCase : Optional[Any] = Normalizer().fit_transform(data_input_df.values)
# split data
_lowerCamelCase : List[str] = normalize_df[:, 2].tolist()
_lowerCamelCase : Dict = normalize_df[:, 0].tolist()
_lowerCamelCase : Optional[Any] = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowerCamelCase : Union[str, Any] = normalize_df[:, [1, 2]].tolist()
_lowerCamelCase : Any = x[: len(x) - 1]
_lowerCamelCase : Optional[int] = x[len(x) - 1 :]
# for linear regression & sarimax
_lowerCamelCase : List[str] = total_date[: len(total_date) - 1]
_lowerCamelCase : Any = total_user[: len(total_user) - 1]
_lowerCamelCase : Dict = total_match[: len(total_match) - 1]
_lowerCamelCase : Any = total_date[len(total_date) - 1 :]
_lowerCamelCase : List[str] = total_user[len(total_user) - 1 :]
_lowerCamelCase : Any = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowerCamelCase : List[str] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowerCamelCase : int = '' if data_safety_checker(res_vote, tst_user) else 'not '
print('Today\'s data is {not_str}safe.')
| 407
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = '''▁'''
lowercase_ = {'''vocab_file''': '''sentencepiece.bpe.model'''}
lowercase_ = {
'''vocab_file''': {
'''facebook/mbart-large-50-one-to-many-mmt''': (
'''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'''
),
}
}
lowercase_ = {
'''facebook/mbart-large-50-one-to-many-mmt''': 10_24,
}
# fmt: off
lowercase_ = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI''']
class _UpperCamelCase ( lowercase__ ):
'''simple docstring'''
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = ["""input_ids""", """attention_mask"""]
_A = []
_A = []
def __init__( self : int , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Any=None , SCREAMING_SNAKE_CASE_ : Optional[int]=None , SCREAMING_SNAKE_CASE_ : List[str]="</s>" , SCREAMING_SNAKE_CASE_ : Any="</s>" , SCREAMING_SNAKE_CASE_ : List[str]="<s>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<unk>" , SCREAMING_SNAKE_CASE_ : str="<pad>" , SCREAMING_SNAKE_CASE_ : str="<mask>" , SCREAMING_SNAKE_CASE_ : Optional[Dict[str, Any]] = None , **SCREAMING_SNAKE_CASE_ : Optional[int] , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = kwargs.get('additional_special_tokens' , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=_A , tgt_lang=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , )
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_A ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a = 1
_a = len(self.sp_model )
_a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_A )
}
_a = {v: k for k, v in self.lang_code_to_id.items()}
_a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a = src_lang if src_lang is not None else 'en_XX'
_a = self.lang_code_to_id[self._src_lang]
_a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _UpperCAmelCase ( self : Tuple ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _UpperCAmelCase ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : str ):
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : Optional[Any] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Optional[Any] ):
_a = {self.convert_ids_to_tokens(_A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : str ):
return self.sp_model.encode(_A , out_type=_A )
def _UpperCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(_A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : Any ):
_a = []
_a = ''
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_A ) + token
_a = True
_a = []
else:
current_sub_tokens.append(_A )
_a = False
out_string += self.sp_model.decode(_A )
return out_string.strip()
def _UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] = None ):
if not os.path.isdir(_A ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_a = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _A )
elif not os.path.isfile(self.vocab_file ):
with open(_A , 'wb' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(_A )
return (out_vocab_file,)
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None , SCREAMING_SNAKE_CASE_ : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_A )) + suffix_ones
return prefix_ones + ([0] * len(_A )) + ([0] * len(_A )) + suffix_ones
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[int] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _UpperCAmelCase ( self : int , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[str] , SCREAMING_SNAKE_CASE_ : Optional[str] , **SCREAMING_SNAKE_CASE_ : int ):
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
_a = src_lang
_a = self(_A , add_special_tokens=_A , return_tensors=_A , **_A )
_a = self.convert_tokens_to_ids(_A )
_a = tgt_lang_id
return inputs
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str = "en_XX" , SCREAMING_SNAKE_CASE_ : Optional[List[str]] = None , SCREAMING_SNAKE_CASE_ : str = "ro_RO" , **SCREAMING_SNAKE_CASE_ : List[Any] , ):
_a = src_lang
_a = tgt_lang
return super().prepare_seqaseq_batch(_A , _A , **_A )
def _UpperCAmelCase ( self : Union[str, Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def _UpperCAmelCase ( self : Any ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _UpperCAmelCase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : str ):
_a = self.lang_code_to_id[src_lang]
_a = [self.cur_lang_code_id]
_a = [self.eos_token_id]
def _UpperCAmelCase ( self : str , SCREAMING_SNAKE_CASE_ : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [self.cur_lang_code_id]
_a = [self.eos_token_id]
| 562
|
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
def wrapper(*lowerCAmelCase : List[str] , **lowerCAmelCase : Dict ):
__magic_name__ : Tuple = timeit.default_timer()
__magic_name__ : int = func(*lowerCAmelCase , **lowerCAmelCase )
__magic_name__ : int = timeit.default_timer() - starttime
return delta
__magic_name__ : int = func.__name__
return wrapper
def lowerCamelCase ( lowerCAmelCase : dict , lowerCAmelCase : List[str]=100 , lowerCAmelCase : Union[str, Any]=None ):
"""simple docstring"""
__magic_name__ : int = []
__magic_name__ : Dict = seq_shapes or {}
for i in range(lowerCAmelCase ):
__magic_name__ : str = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(lowerCAmelCase , _ArrayXD ):
__magic_name__ : Union[str, Any] = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(lowerCAmelCase , datasets.Value ):
if v.dtype == "string":
__magic_name__ : int = 'The small grey turtle was surprisingly fast when challenged.'
else:
__magic_name__ : List[str] = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(lowerCAmelCase , datasets.Sequence ):
while isinstance(lowerCAmelCase , datasets.Sequence ):
__magic_name__ : Any = v.feature
__magic_name__ : str = seq_shapes[k]
__magic_name__ : Any = np.random.rand(*lowerCAmelCase ).astype(v.dtype )
__magic_name__ : Optional[Any] = data
dummy_data.append((i, example) )
return dummy_data
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : int=100 , lowerCAmelCase : List[Any]=None ):
"""simple docstring"""
__magic_name__ : Optional[int] = generate_examples(lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes=lowerCAmelCase )
with ArrowWriter(features=lowerCAmelCase , path=lowerCAmelCase ) as writer:
for key, record in dummy_data:
__magic_name__ : Dict = features.encode_example(lowerCAmelCase )
writer.write(lowerCAmelCase )
__magic_name__ , __magic_name__ : List[str] = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f'Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.' )
__magic_name__ : Optional[Any] = datasets.Dataset.from_file(filename=lowerCAmelCase , info=datasets.DatasetInfo(features=lowerCAmelCase ) )
return dataset
| 561
| 0
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_lowerCAmelCase : List[Any] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = """sgugger/tiny-distilbert-classification"""
_lowerCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , torchscript=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , fpaa=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : int = AutoConfig.from_pretrained(_UpperCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(_UpperCAmelCase , """env.csv""" ) , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_UpperCAmelCase : int ):
self.assertTrue(hasattr(_UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , """log.txt""" ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """log.txt""" ) ).exists() )
| 196
|
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case (unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""] ):
_lowerCAmelCase : List[Any] = model_result["""result"""][batch_size][sequence_length]
self.assertIsNotNone(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[str] = """sgugger/tiny-distilbert-classification"""
_lowerCAmelCase : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , only_pretrain_model=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , torchscript=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , fpaa=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : str ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : int = AutoConfig.from_pretrained(_UpperCAmelCase )
# set architectures equal to `None`
_lowerCAmelCase : Optional[int] = None
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[str] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == """cpu""" , """Can't do half precision""" )
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : int = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Union[str, Any] = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Dict = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Tuple = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Any = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = """sshleifer/tiny-gpt2"""
_lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = """sshleifer/tinier_bart"""
_lowerCAmelCase : Dict = AutoConfig.from_pretrained(_UpperCAmelCase )
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : Optional[Any] = PyTorchBenchmark(_UpperCAmelCase , configs=[config] )
_lowerCAmelCase : int = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def SCREAMING_SNAKE_CASE ( self : int ) -> int:
'''simple docstring'''
_lowerCAmelCase : Tuple = """sshleifer/tiny-gpt2"""
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , save_to_csv=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_UpperCAmelCase , """inf_time.csv""" ) , train_memory_csv_file=os.path.join(_UpperCAmelCase , """train_mem.csv""" ) , inference_memory_csv_file=os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) , train_time_csv_file=os.path.join(_UpperCAmelCase , """train_time.csv""" ) , env_info_csv_file=os.path.join(_UpperCAmelCase , """env.csv""" ) , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : List[Any] = PyTorchBenchmark(_UpperCAmelCase )
benchmark.run()
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_time.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """inf_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """train_mem.csv""" ) ).exists() )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """env.csv""" ) ).exists() )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : str = """sshleifer/tiny-gpt2"""
def _check_summary_is_not_empty(_UpperCAmelCase : int ):
self.assertTrue(hasattr(_UpperCAmelCase , """sequential""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """cumulative""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """current""" ) )
self.assertTrue(hasattr(_UpperCAmelCase , """total""" ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_lowerCAmelCase : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_UpperCAmelCase , inference=_UpperCAmelCase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_UpperCAmelCase , """log.txt""" ) , log_print=_UpperCAmelCase , trace_memory_line_by_line=_UpperCAmelCase , multi_process=_UpperCAmelCase , )
_lowerCAmelCase : str = PyTorchBenchmark(_UpperCAmelCase )
_lowerCAmelCase : Any = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_UpperCAmelCase , """log.txt""" ) ).exists() )
| 196
| 1
|
"""simple docstring"""
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__A : List[str] = parse(importlib.metadata.version("torch"))
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Version] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
_UpperCAmelCase = STR_OPERATION_TO_FUNC[operation]
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = parse(importlib.metadata.version(_SCREAMING_SNAKE_CASE ) )
return operation(_SCREAMING_SNAKE_CASE , parse(_SCREAMING_SNAKE_CASE ) )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
return compare_versions(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 602
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__A : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 602
| 1
|
import math
import qiskit
def __UpperCAmelCase ( a_ = 1 , a_ = 1 , a_ = 1):
if (
isinstance(a_ , a_)
or isinstance(a_ , a_)
or isinstance(a_ , a_)
):
raise TypeError('inputs must be integers.')
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError('inputs must be positive.')
if (
(math.floor(a_) != input_a)
or (math.floor(a_) != input_a)
or (math.floor(a_) != carry_in)
):
raise ValueError('inputs must be exact integers.')
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError('inputs must be less or equal to 2.')
# build registers
snake_case_ = qiskit.QuantumRegister(4 , 'qr')
snake_case_ = qiskit.ClassicalRegister(2 , 'cr')
# list the entries
snake_case_ = [input_a, input_a, carry_in]
snake_case_ = qiskit.QuantumCircuit(a_ , a_)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(a_) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(a_) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(a_) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , a_) # measure the last two qbits
snake_case_ = qiskit.Aer.get_backend('aer_simulator')
snake_case_ = qiskit.execute(a_ , a_ , shots=10_00)
return job.result().get_counts(a_)
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 607
|
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
lowercase = parse(importlib.metadata.version("torch"))
def __UpperCAmelCase ( a_ , a_ , a_):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(f'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}''')
snake_case_ = STR_OPERATION_TO_FUNC[operation]
if isinstance(a_ , a_):
snake_case_ = parse(importlib.metadata.version(a_))
return operation(a_ , parse(a_))
def __UpperCAmelCase ( a_ , a_):
return compare_versions(a_ , a_ , a_)
| 607
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
snake_case = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 378
|
'''simple docstring'''
from collections.abc import Generator
from math import sin
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) != 32:
raise ValueError('''Input must be of length 32''' )
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''08x''' )[-8:]
_a = B''''''
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('''utf-8''' )
return little_endian_hex
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = B''''''
for char in message:
bit_string += format(UpperCamelCase , '''08b''' ).encode('''utf-8''' )
_a = format(len(UpperCamelCase ) , '''064b''' ).encode('''utf-8''' )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(UpperCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
if len(UpperCamelCase ) % 512 != 0:
raise ValueError('''Input must have length that\'s a multiple of 512''' )
for pos in range(0 , len(UpperCamelCase ) , 512 ):
_a = bit_string[pos : pos + 512]
_a = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
_a = format(UpperCamelCase , '''032b''' )
_a = ''''''
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(UpperCamelCase , 2 )
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
return (a + b) % 2**32
def snake_case_ (UpperCamelCase : int , UpperCamelCase : int ):
'''simple docstring'''
if i < 0:
raise ValueError('''Input must be non-negative''' )
if shift < 0:
raise ValueError('''Shift must be non-negative''' )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def snake_case_ (UpperCamelCase : bytes ):
'''simple docstring'''
_a = preprocess(UpperCamelCase )
_a = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_a = 0X67452301
_a = 0Xefcdab89
_a = 0X98badcfe
_a = 0X10325476
_a = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(UpperCamelCase ):
_a = aa
_a = ba
_a = ca
_a = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_a = d ^ (b & (c ^ d))
_a = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_a = c ^ (d & (b ^ c))
_a = (5 * i + 1) % 16
elif i <= 47:
_a = b ^ c ^ d
_a = (3 * i + 5) % 16
else:
_a = c ^ (b | not_aa(UpperCamelCase ))
_a = (7 * i) % 16
_a = (f + a + added_consts[i] + block_words[g]) % 2**32
_a = d
_a = c
_a = b
_a = sum_aa(UpperCamelCase , left_rotate_aa(UpperCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = sum_aa(UpperCamelCase , UpperCamelCase )
_a = reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase ) + reformat_hex(UpperCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 22
| 0
|
from torch import nn
def UpperCAmelCase__ ( lowercase__ ) -> Tuple:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"Unsupported activation function: {act_fn}" )
| 704
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCAmelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
lowercase__ : int = IFImgaImgSuperResolutionPipeline
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""width""", """height"""}
lowercase__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""original_image"""} )
lowercase__ : Tuple = PipelineTesterMixin.required_optional_params - {"""latents"""}
def snake_case__ ( self : Tuple ) -> Any:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def snake_case__ ( self : List[str] , lowercase : Optional[int] , lowercase : Optional[Any]=0 ) -> Union[str, Any]:
"""simple docstring"""
if str(lowercase ).startswith("""mps""" ):
__lowercase = torch.manual_seed(lowercase )
else:
__lowercase = torch.Generator(device=lowercase ).manual_seed(lowercase )
__lowercase = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = floats_tensor((1, 3, 16, 16) , rng=random.Random(lowercase ) ).to(lowercase )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""original_image""": original_image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def snake_case__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def snake_case__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def snake_case__ ( self : Dict ) -> int:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def snake_case__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
self._test_save_load_local()
def snake_case__ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 634
| 0
|
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
BertTokenizer,
ViltConfig,
ViltForImageAndTextRetrieval,
ViltForImagesAndTextClassification,
ViltForMaskedLM,
ViltForQuestionAnswering,
ViltImageProcessor,
ViltProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : str=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : List[str]=False ):
'''simple docstring'''
_lowerCAmelCase : List[str] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"transformer.blocks.{i}.norm1.weight", F"vilt.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm1.bias", F"vilt.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.weight", F"vilt.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"transformer.blocks.{i}.attn.proj.bias", F"vilt.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.norm2.weight", F"vilt.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"transformer.blocks.{i}.norm2.bias", F"vilt.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"transformer.blocks.{i}.mlp.fc1.weight", F"vilt.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc1.bias", F"vilt.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.weight", F"vilt.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"transformer.blocks.{i}.mlp.fc2.bias", F"vilt.encoder.layer.{i}.output.dense.bias") )
# embeddings
rename_keys.extend(
[
# text embeddings
("""text_embeddings.word_embeddings.weight""", """vilt.embeddings.text_embeddings.word_embeddings.weight"""),
(
"""text_embeddings.position_embeddings.weight""",
"""vilt.embeddings.text_embeddings.position_embeddings.weight""",
),
("""text_embeddings.position_ids""", """vilt.embeddings.text_embeddings.position_ids"""),
(
"""text_embeddings.token_type_embeddings.weight""",
"""vilt.embeddings.text_embeddings.token_type_embeddings.weight""",
),
("""text_embeddings.LayerNorm.weight""", """vilt.embeddings.text_embeddings.LayerNorm.weight"""),
("""text_embeddings.LayerNorm.bias""", """vilt.embeddings.text_embeddings.LayerNorm.bias"""),
# patch embeddings
("""transformer.cls_token""", """vilt.embeddings.cls_token"""),
("""transformer.patch_embed.proj.weight""", """vilt.embeddings.patch_embeddings.projection.weight"""),
("""transformer.patch_embed.proj.bias""", """vilt.embeddings.patch_embeddings.projection.bias"""),
("""transformer.pos_embed""", """vilt.embeddings.position_embeddings"""),
# token type embeddings
("""token_type_embeddings.weight""", """vilt.embeddings.token_type_embeddings.weight"""),
] )
# final layernorm + pooler
rename_keys.extend(
[
("""transformer.norm.weight""", """vilt.layernorm.weight"""),
("""transformer.norm.bias""", """vilt.layernorm.bias"""),
("""pooler.dense.weight""", """vilt.pooler.dense.weight"""),
("""pooler.dense.bias""", """vilt.pooler.dense.bias"""),
] )
# classifier head(s)
if vqa_model:
# classification head
rename_keys.extend(
[
("""vqa_classifier.0.weight""", """classifier.0.weight"""),
("""vqa_classifier.0.bias""", """classifier.0.bias"""),
("""vqa_classifier.1.weight""", """classifier.1.weight"""),
("""vqa_classifier.1.bias""", """classifier.1.bias"""),
("""vqa_classifier.3.weight""", """classifier.3.weight"""),
("""vqa_classifier.3.bias""", """classifier.3.bias"""),
] )
elif nlvr_model:
# classification head
rename_keys.extend(
[
("""nlvr2_classifier.0.weight""", """classifier.0.weight"""),
("""nlvr2_classifier.0.bias""", """classifier.0.bias"""),
("""nlvr2_classifier.1.weight""", """classifier.1.weight"""),
("""nlvr2_classifier.1.bias""", """classifier.1.bias"""),
("""nlvr2_classifier.3.weight""", """classifier.3.weight"""),
("""nlvr2_classifier.3.bias""", """classifier.3.bias"""),
] )
else:
pass
return rename_keys
def _UpperCAmelCase (UpperCamelCase_ : str , UpperCamelCase_ : str ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_lowerCAmelCase : Tuple = """vilt."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCAmelCase : List[str] = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.weight" )
_lowerCAmelCase : Optional[int] = state_dict.pop(F"transformer.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : List[str] = in_proj_weight[
: config.hidden_size, :
]
_lowerCAmelCase : Optional[Any] = in_proj_bias[: config.hidden_size]
_lowerCAmelCase : Dict = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCAmelCase : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def _UpperCAmelCase (UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def _UpperCAmelCase (UpperCamelCase_ : Any , UpperCamelCase_ : List[Any] , UpperCamelCase_ : Tuple ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = dct.pop(UpperCamelCase__ )
_lowerCAmelCase : List[str] = val
@torch.no_grad()
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=UpperCamelCase__ )
_lowerCAmelCase : str = False
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Optional[int] = False
_lowerCAmelCase : Any = False
if "vqa" in checkpoint_url:
_lowerCAmelCase : int = True
_lowerCAmelCase : Union[str, Any] = 3129
_lowerCAmelCase : List[str] = """huggingface/label-files"""
_lowerCAmelCase : Any = """vqa2-id2label.json"""
_lowerCAmelCase : Dict = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
_lowerCAmelCase : Optional[Any] = idalabel
_lowerCAmelCase : Dict = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : List[str] = ViltForQuestionAnswering(UpperCamelCase__ )
elif "nlvr" in checkpoint_url:
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Optional[int] = 2
_lowerCAmelCase : int = {0: """False""", 1: """True"""}
_lowerCAmelCase : Any = {v: k for k, v in config.idalabel.items()}
_lowerCAmelCase : List[str] = 3
_lowerCAmelCase : List[Any] = ViltForImagesAndTextClassification(UpperCamelCase__ )
elif "irtr" in checkpoint_url:
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Tuple = ViltForImageAndTextRetrieval(UpperCamelCase__ )
elif "mlm_itm" in checkpoint_url:
_lowerCAmelCase : Optional[int] = True
_lowerCAmelCase : List[Any] = ViltForMaskedLM(UpperCamelCase__ )
else:
raise ValueError("""Unknown model type""" )
# load state_dict of original model, remove and rename some keys
_lowerCAmelCase : Tuple = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""state_dict"""]
_lowerCAmelCase : Any = create_rename_keys(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ )
if mlm_model or irtr_model:
_lowerCAmelCase : Optional[Any] = ["""itm_score.fc.weight""", """itm_score.fc.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
# load state dict into HuggingFace model
model.eval()
if mlm_model:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
assert missing_keys == ["mlm_score.decoder.bias"]
else:
model.load_state_dict(UpperCamelCase__ )
# Define processor
_lowerCAmelCase : Optional[int] = ViltImageProcessor(size=384 )
_lowerCAmelCase : Optional[Any] = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_lowerCAmelCase : str = ViltProcessor(UpperCamelCase__ , UpperCamelCase__ )
# Forward pass on example inputs (image + text)
if nlvr_model:
_lowerCAmelCase : Any = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase__ ).raw )
_lowerCAmelCase : List[Any] = Image.open(requests.get("""https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg""" , stream=UpperCamelCase__ ).raw )
_lowerCAmelCase : Optional[int] = (
"""The left image contains twice the number of dogs as the right image, and at least two dogs in total are"""
""" standing."""
)
_lowerCAmelCase : Any = processor(UpperCamelCase__ , UpperCamelCase__ , return_tensors="""pt""" )
_lowerCAmelCase : Optional[int] = processor(UpperCamelCase__ , UpperCamelCase__ , return_tensors="""pt""" )
_lowerCAmelCase : int = model(
input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , )
else:
_lowerCAmelCase : Dict = Image.open(requests.get("""http://images.cocodataset.org/val2017/000000039769.jpg""" , stream=UpperCamelCase__ ).raw )
if mlm_model:
_lowerCAmelCase : List[str] = """a bunch of [MASK] laying on a [MASK]."""
else:
_lowerCAmelCase : Union[str, Any] = """How many cats are there?"""
_lowerCAmelCase : int = processor(UpperCamelCase__ , UpperCamelCase__ , return_tensors="""pt""" )
_lowerCAmelCase : List[Any] = model(**UpperCamelCase__ )
# Verify outputs
if mlm_model:
_lowerCAmelCase : str = torch.Size([1, 11, 30522] )
_lowerCAmelCase : Any = torch.tensor([-12.5_061, -12.5_123, -12.5_174] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase__ , atol=1E-4 )
# verify masked token prediction equals "cats"
_lowerCAmelCase : List[Any] = outputs.logits[0, 4, :].argmax(-1 ).item()
assert tokenizer.decode([predicted_id] ) == "cats"
elif vqa_model:
_lowerCAmelCase : Tuple = torch.Size([1, 3129] )
_lowerCAmelCase : Dict = torch.tensor([-15.9_495, -18.1_472, -10.3_041] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, 0, :3] , UpperCamelCase__ , atol=1E-4 )
# verify vqa prediction equals "2"
_lowerCAmelCase : Optional[int] = outputs.logits.argmax(-1 ).item()
assert model.config.idalabel[predicted_idx] == "2"
elif nlvr_model:
_lowerCAmelCase : Union[str, Any] = torch.Size([1, 2] )
_lowerCAmelCase : str = torch.tensor([-2.8_721, 2.1_291] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
assert outputs.logits.shape == expected_shape
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
print(F"Saving model and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--checkpoint_url",
default="https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt",
type=str,
help="URL of the checkpoint you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
_lowerCamelCase : Any = parser.parse_args()
convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 429
|
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
_UpperCAmelCase : Dict = """sshleifer/bart-tiny-random"""
_UpperCAmelCase : Optional[Any] = """patrickvonplaten/t5-tiny-random"""
@require_torch
class lowercase ( unittest.TestCase ):
@cached_property
def a ( self ):
return AutoConfig.from_pretrained(snake_case )
def a ( self ):
snake_case_ , *snake_case_ = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def a ( self ):
snake_case_ , *snake_case_ = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
def a ( self ):
snake_case_ , *snake_case_ = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=snake_case )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def a ( self ):
snake_case_ , *snake_case_ = create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def a ( self ):
with self.assertRaises(snake_case ):
create_student_by_copying_alternating_layers(snake_case , tempfile.mkdtemp() , e=snake_case , d=snake_case )
| 362
| 0
|
__a : Dict = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 712
|
from __future__ import annotations
def snake_case_ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowercase__ : List[str] = [True] * limit
lowercase__ : Union[str, Any] = False
lowercase__ : List[str] = False
lowercase__ : List[str] = True
for i in range(3 ,int(limit**0.5 + 1 ) ,2 ):
lowercase__ : Dict = i * 2
while index < limit:
lowercase__ : Union[str, Any] = False
lowercase__ : str = index + i
lowercase__ : Union[str, Any] = [2]
for i in range(3 ,SCREAMING_SNAKE_CASE_ ,2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE_ )
return primes
def snake_case_ ( SCREAMING_SNAKE_CASE_ = 1_00_00_00 ) -> int:
lowercase__ : Any = prime_sieve(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[str] = 0
lowercase__ : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
for j in range(i + length ,len(SCREAMING_SNAKE_CASE_ ) ):
lowercase__ : Optional[Any] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase__ : Dict = j - i
lowercase__ : Any = sol
return largest
if __name__ == "__main__":
print(f'{solution() = }')
| 298
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a__ ( a__ , unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = KandinskyVaaPriorPipeline
lowercase__ : List[str] = ["prompt"]
lowercase__ : Union[str, Any] = ["prompt", "negative_prompt"]
lowercase__ : Optional[int] = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
lowercase__ : str = False
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> str:
return 32
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
return self.time_input_dim
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
return self.time_input_dim * 4
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
return 1_00
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(lowerCamelCase_ )
@property
def __SCREAMING_SNAKE_CASE ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase__ = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 12,
'''embedding_dim''': self.text_embedder_hidden_size,
'''num_layers''': 1,
}
lowerCAmelCase__ = PriorTransformer(**lowerCamelCase_ )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase__ = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> int:
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=2_24 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
lowerCAmelCase__ = CLIPVisionModelWithProjection(lowerCamelCase_ )
return model
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Any:
lowerCAmelCase__ = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=lowerCamelCase_ , do_normalize=lowerCamelCase_ , do_resize=lowerCamelCase_ , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_24 , )
return image_processor
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = self.dummy_prior
lowerCAmelCase__ = self.dummy_image_encoder
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = self.dummy_tokenizer
lowerCAmelCase__ = self.dummy_image_processor
lowerCAmelCase__ = UnCLIPScheduler(
variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=10_00 , clip_sample=lowerCamelCase_ , clip_sample_range=10.0 , )
lowerCAmelCase__ = {
'''prior''': prior,
'''image_encoder''': image_encoder,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''scheduler''': scheduler,
'''image_processor''': image_processor,
}
return components
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_=0 ) -> int:
if str(lowerCamelCase_ ).startswith('''mps''' ):
lowerCAmelCase__ = torch.manual_seed(lowerCamelCase_ )
else:
lowerCAmelCase__ = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCAmelCase__ = {
'''prompt''': '''horse''',
'''generator''': generator,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def __SCREAMING_SNAKE_CASE ( self ) -> str:
lowerCAmelCase__ = '''cpu'''
lowerCAmelCase__ = self.get_dummy_components()
lowerCAmelCase__ = self.pipeline_class(**lowerCamelCase_ )
lowerCAmelCase__ = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCAmelCase__ = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCAmelCase__ = output.image_embeds
lowerCAmelCase__ = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
lowerCAmelCase__ = image[0, -10:]
lowerCAmelCase__ = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowerCAmelCase__ = np.array(
[-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = torch_device == '''cpu'''
lowerCAmelCase__ = True
lowerCAmelCase__ = False
self._test_inference_batch_single_identical(
test_max_difference=lowerCamelCase_ , relax_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , )
@skip_mps
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
lowerCAmelCase__ = torch_device == '''cpu'''
lowerCAmelCase__ = False
self._test_attention_slicing_forward_pass(
test_max_difference=lowerCamelCase_ , test_mean_pixel_difference=lowerCamelCase_ , )
| 90
|
from __future__ import annotations
def snake_case_ (__A : list[int] , __A : list[int] , __A : list[int] , __A : list[list[str]] , __A : int , ) -> None:
__lowerCAmelCase : Any = len(__A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __A , __A , )
def snake_case_ (__A : int ) -> None:
__lowerCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , __A , __A )
# Print all the boards
for board in boards:
for column in board:
print(__A )
print("""""" )
print(len(__A ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 651
| 0
|
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
__UpperCAmelCase ={
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__UpperCAmelCase ={
"""allenai/led-base-16384""": 1_6384,
}
class lowerCAmelCase__ ( UpperCAmelCase_ ):
lowercase__ : Union[str, Any] = VOCAB_FILES_NAMES
lowercase__ : str = PRETRAINED_VOCAB_FILES_MAP
lowercase__ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ : List[str] = LEDTokenizer
lowercase__ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="replace" , UpperCamelCase__="<s>" , UpperCamelCase__="</s>" , UpperCamelCase__="</s>" , UpperCamelCase__="<s>" , UpperCamelCase__="<unk>" , UpperCamelCase__="<pad>" , UpperCamelCase__="<mask>" , UpperCamelCase__=False , UpperCamelCase__=True , **UpperCamelCase__ , ):
'''simple docstring'''
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , errors=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , trim_offsets=UpperCamelCase__ , **UpperCamelCase__ , )
A__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
A__ = getattr(UpperCamelCase__ , pre_tok_state.pop("type" ) )
A__ = add_prefix_space
A__ = pre_tok_class(**UpperCamelCase__ )
A__ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
A__ = "post_processor"
A__ = getattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
if tokenizer_component_instance:
A__ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
A__ = tuple(state["sep"] )
if "cls" in state:
A__ = tuple(state["cls"] )
A__ = False
if state.get("add_prefix_space" , UpperCamelCase__ ) != add_prefix_space:
A__ = add_prefix_space
A__ = True
if state.get("trim_offsets" , UpperCamelCase__ ) != trim_offsets:
A__ = trim_offsets
A__ = True
if changes_to_apply:
A__ = getattr(UpperCamelCase__ , state.pop("type" ) )
A__ = component_class(**UpperCamelCase__ )
setattr(self.backend_tokenizer , UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowercase_ ( self ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def lowercase_ ( self , UpperCamelCase__ ):
'''simple docstring'''
A__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else value
A__ = value
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
A__ = kwargs.get("is_split_into_words" , UpperCamelCase__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"to use it with pretokenized inputs." )
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
A__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None ):
'''simple docstring'''
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = PaddingStrategy.DO_NOT_PAD , UpperCamelCase__ = None , UpperCamelCase__ = None , ):
'''simple docstring'''
A__ = super()._pad(
encoded_inputs=UpperCamelCase__ , max_length=UpperCamelCase__ , padding_strategy=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
A__ = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
A__ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
A__ = len(encoded_inputs["global_attention_mask"] ) != len(UpperCamelCase__ )
if needs_to_be_padded:
A__ = len(UpperCamelCase__ ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
A__ = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
A__ = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 708
|
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowerCAmelCase__ ( UpperCAmelCase_ ):
def __init__( self , UpperCamelCase__ = 1_01 ):
'''simple docstring'''
A__ = length
def __len__( self ):
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase__ ):
'''simple docstring'''
return i
class lowerCAmelCase__ :
def __call__( self , UpperCamelCase__ ):
'''simple docstring'''
return {"input_ids": torch.tensor(UpperCamelCase__ ), "labels": torch.tensor(UpperCamelCase__ )}
class lowerCAmelCase__ ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
A__ = nn.Linear(1_20 , 80 )
def lowercase_ ( self , UpperCamelCase__ , UpperCamelCase__=None ):
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@require_torch_neuroncore
def lowercase_ ( self ):
'''simple docstring'''
A__ = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""--output_dir {output_dir}""".split()
A__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowerCAmelCase__ ( UpperCAmelCase_ ):
@require_torch_multi_gpu
def lowercase_ ( self ):
'''simple docstring'''
A__ = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
A__ = self.get_auto_remove_tmp_dir()
A__ = f"""--output_dir {output_dir}""".split()
A__ = ["torchrun"] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
__UpperCAmelCase =HfArgumentParser((TrainingArguments,))
__UpperCAmelCase =parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [101, 40, 7]:
__UpperCAmelCase =DummyDataset(dataset_length)
def __a ( A ) -> Dict:
'''simple docstring'''
A__ = list(range(len(A ) ) )
A__ = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"Predictions and/or labels do not match expected results:\n - predictions: "
f"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
__UpperCAmelCase =Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
__UpperCAmelCase =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase =2
__UpperCAmelCase =trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
__UpperCAmelCase =trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
__UpperCAmelCase =None
| 261
| 0
|
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def UpperCAmelCase_ ( A , A , A , A , A=True , A="pt" ):
'''simple docstring'''
_a : List[str] = {'add_prefix_space': True} if isinstance(A , A ) and not line.startswith(' ' ) else {}
_a : int = padding_side
return tokenizer(
[line] , max_length=A , padding='max_length' if pad_to_max_length else None , truncation=A , return_tensors=A , add_special_tokens=A , **A , )
def UpperCAmelCase_ ( A , A , A=None , ):
'''simple docstring'''
_a : List[Any] = input_ids.ne(A ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class a ( snake_case__ ):
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_="train" , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_="" , ) -> Optional[int]:
super().__init__()
_a : int = Path(lowerCamelCase_ ).joinpath(type_path + '.source' )
_a : List[Any] = Path(lowerCamelCase_ ).joinpath(type_path + '.target' )
_a : Tuple = self.get_char_lens(self.src_file )
_a : Dict = max_source_length
_a : Any = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
_a : Tuple = tokenizer
_a : int = prefix
if n_obs is not None:
_a : Any = self.src_lens[:n_obs]
_a : List[str] = src_lang
_a : Any = tgt_lang
def __len__( self ) -> Tuple:
return len(self.src_lens )
def __getitem__( self , lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
_a : Optional[Any] = index + 1 # linecache starts at 1
_a : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) , lowerCamelCase_ ).rstrip('\n' )
_a : Optional[int] = linecache.getline(str(self.tgt_file ) , lowerCamelCase_ ).rstrip('\n' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
_a : str = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
)
_a : Dict = self.tokenizer.generator if isinstance(self.tokenizer , lowerCamelCase_ ) else self.tokenizer
_a : Optional[int] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_source_length , 'right' )
_a : Union[str, Any] = encode_line(lowerCamelCase_ , lowerCamelCase_ , self.max_target_length , 'right' )
_a : List[str] = source_inputs['input_ids'].squeeze()
_a : Union[str, Any] = target_inputs['input_ids'].squeeze()
_a : Optional[Any] = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def __UpperCamelCase ( lowerCamelCase_ ) -> Union[str, Any]:
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def __UpperCamelCase ( self , lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
_a : Any = torch.stack([x['input_ids'] for x in batch] )
_a : int = torch.stack([x['attention_mask'] for x in batch] )
_a : Dict = torch.stack([x['decoder_input_ids'] for x in batch] )
_a : List[Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
_a : List[Any] = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
_a : Tuple = trim_batch(lowerCamelCase_ , lowerCamelCase_ )
_a , _a : Optional[int] = trim_batch(lowerCamelCase_ , lowerCamelCase_ , attention_mask=lowerCamelCase_ )
_a : Any = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
UpperCAmelCase_ : Any = getLogger(__name__)
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return list(itertools.chain.from_iterable(A ) )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Tuple = get_git_info()
save_json(A , os.path.join(A , 'git_log.json' ) )
def UpperCAmelCase_ ( A , A , A=4 , **A ):
'''simple docstring'''
with open(A , 'w' ) as f:
json.dump(A , A , indent=A , **A )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
with open(A ) as f:
return json.load(A )
def UpperCAmelCase_ ( ):
'''simple docstring'''
_a : Any = git.Repo(search_parent_directories=A )
_a : Optional[Any] = {
'repo_id': str(A ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return list(map(A , A ) )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
with open(A , 'wb' ) as f:
return pickle.dump(A , A )
def UpperCAmelCase_ ( A ):
'''simple docstring'''
def remove_articles(A ):
return re.sub(r'\b(a|an|the)\b' , ' ' , A )
def white_space_fix(A ):
return " ".join(text.split() )
def remove_punc(A ):
_a : Any = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(A ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(A ) ) ) )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
_a : Optional[int] = normalize_answer(A ).split()
_a : str = normalize_answer(A ).split()
_a : Optional[Any] = Counter(A ) & Counter(A )
_a : List[str] = sum(common.values() )
if num_same == 0:
return 0
_a : List[str] = 1.0 * num_same / len(A )
_a : List[str] = 1.0 * num_same / len(A )
_a : List[str] = (2 * precision * recall) / (precision + recall)
return fa
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
return normalize_answer(A ) == normalize_answer(A )
def UpperCAmelCase_ ( A , A ):
'''simple docstring'''
assert len(A ) == len(A )
_a : Optional[Any] = 0
for hypo, pred in zip(A , A ):
em += exact_match_score(A , A )
if len(A ) > 0:
em /= len(A )
return {"em": em}
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return model_prefix.startswith('rag' )
def UpperCAmelCase_ ( A , A , A ):
'''simple docstring'''
_a : Tuple = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
_a : List[Any] = 'dropout_rate'
for p in extra_params:
if getattr(A , A , A ):
if not hasattr(A , A ) and not hasattr(A , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(A ) )
delattr(A , A )
continue
_a : Tuple = p if hasattr(A , A ) else equivalent_param[p]
setattr(A , A , getattr(A , A ) )
delattr(A , A )
return hparams, config
| 120
|
'''simple docstring'''
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def UpperCAmelCase_ ( A ):
'''simple docstring'''
_a : Dict = args.pruning_method
_a : Optional[Any] = args.threshold
_a : Union[str, Any] = args.model_name_or_path.rstrip('/' )
_a : Dict = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_a : List[str] = torch.load(os.path.join(A , 'pytorch_model.bin' ) )
_a : str = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_a : List[Any] = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_a : List[str] = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_a : Optional[Any] = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_a : Tuple = MagnitudeBinarizer.apply(inputs=A , threshold=A )
_a : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_a : str = name[:-6]
_a : int = model[f'''{prefix_}mask_scores''']
_a : List[str] = TopKBinarizer.apply(A , A )
_a : Tuple = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_a : str = name[:-6]
_a : List[str] = model[f'''{prefix_}mask_scores''']
_a : Tuple = ThresholdBinarizer.apply(A , A , A )
_a : Union[str, Any] = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_a : Optional[int] = name[:-6]
_a : Any = model[f'''{prefix_}mask_scores''']
_a , _a : Optional[int] = -0.1, 1.1
_a : List[str] = torch.sigmoid(A )
_a : Dict = s * (r - l) + l
_a : int = s_bar.clamp(min=0.0 , max=1.0 )
_a : List[str] = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_a : Tuple = os.path.join(
os.path.dirname(A ) , f'''bertarized_{os.path.basename(A )}''' )
if not os.path.isdir(A ):
shutil.copytree(A , A )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(A , os.path.join(A , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
UpperCAmelCase_ : Optional[Any] = parser.parse_args()
main(args)
| 120
| 1
|
'''simple docstring'''
def lowercase__ ( _UpperCamelCase) -> Tuple:
"""simple docstring"""
if not all(char in '01' for char in bin_string):
raise ValueError('Non-binary value was passed to the function')
if not bin_string:
raise ValueError('Empty string was passed to the function')
UpperCamelCase = ""
while len(__lowerCAmelCase) % 3 != 0:
UpperCamelCase = "0" + bin_string
UpperCamelCase = [
bin_string[index : index + 3]
for index in range(len(__lowerCAmelCase))
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase = 0
for index, val in enumerate(__lowerCAmelCase):
oct_val += int(2 ** (2 - index) * int(__lowerCAmelCase))
oct_string += str(__lowerCAmelCase)
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 718
|
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__magic_name__ : Any = logging.get_logger(__name__)
__magic_name__ : str = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : str = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : int = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Union[str, Any] = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
__magic_name__ : List[str] = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
__magic_name__ : str = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
__magic_name__ : Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
__magic_name__ : Optional[Any] = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
__magic_name__ : Optional[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__magic_name__ : Tuple = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
__magic_name__ : Optional[Any] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
__magic_name__ : str = r'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(__snake_case )
class A__ :
'''simple docstring'''
def __call__( self : Optional[int] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Optional[str] = None , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Union[bool, str] = False , _SCREAMING_SNAKE_CASE : Optional[int] = None , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : Optional[bool] = None , **_SCREAMING_SNAKE_CASE : Any , ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
elif titles is None or texts is None:
UpperCamelCase = titles if texts is None else texts
return super().__call__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = titles if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [titles]
UpperCamelCase = texts if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [texts]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = questions if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else [questions] * n_passages
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f'There should be as many titles than texts but got {len(_SCREAMING_SNAKE_CASE )} titles and {len(_SCREAMING_SNAKE_CASE )} texts.' )
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['input_ids']
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )['input_ids']
UpperCamelCase = {
'input_ids': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
]
}
if return_attention_mask is not False:
UpperCamelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
UpperCamelCase = attention_mask
return self.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : List[str] , _SCREAMING_SNAKE_CASE : BatchEncoding , _SCREAMING_SNAKE_CASE : DPRReaderOutput , _SCREAMING_SNAKE_CASE : int = 16 , _SCREAMING_SNAKE_CASE : int = 64 , _SCREAMING_SNAKE_CASE : int = 4 , ):
"""simple docstring"""
UpperCamelCase = reader_input['input_ids']
UpperCamelCase , UpperCamelCase , UpperCamelCase = reader_output[:3]
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = sorted(range(_SCREAMING_SNAKE_CASE ) , reverse=_SCREAMING_SNAKE_CASE , key=relevance_logits.__getitem__ )
UpperCamelCase = []
for doc_id in sorted_docs:
UpperCamelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
UpperCamelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
UpperCamelCase = sequence_ids.index(self.pad_token_id )
else:
UpperCamelCase = len(_SCREAMING_SNAKE_CASE )
UpperCamelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_SCREAMING_SNAKE_CASE , top_spans=_SCREAMING_SNAKE_CASE , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_SCREAMING_SNAKE_CASE , start_index=_SCREAMING_SNAKE_CASE , end_index=_SCREAMING_SNAKE_CASE , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_SCREAMING_SNAKE_CASE ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
UpperCamelCase = []
for start_index, start_score in enumerate(_SCREAMING_SNAKE_CASE ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
UpperCamelCase = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[1] , reverse=_SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]' )
UpperCamelCase = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'Span is too long: {length} > {max_answer_length}' )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_SCREAMING_SNAKE_CASE ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__snake_case )
class A__ ( __snake_case , __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case__ = ["""input_ids""", """attention_mask"""]
| 410
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_A = logging.get_logger(__name__)
_A = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class _lowerCamelCase ( a_ ):
_lowerCamelCase :str = "efficientnet"
def __init__( self : Optional[int] , UpperCamelCase : int = 3 , UpperCamelCase : int = 6_00 , UpperCamelCase : float = 2.0 , UpperCamelCase : float = 3.1 , UpperCamelCase : int = 8 , UpperCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase : List[int] = [32, 16, 24, 40, 80, 1_12, 1_92] , UpperCamelCase : List[int] = [16, 24, 40, 80, 1_12, 1_92, 3_20] , UpperCamelCase : List[int] = [] , UpperCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase : float = 0.25 , UpperCamelCase : str = "swish" , UpperCamelCase : int = 25_60 , UpperCamelCase : str = "mean" , UpperCamelCase : float = 0.02 , UpperCamelCase : float = 0.001 , UpperCamelCase : float = 0.99 , UpperCamelCase : float = 0.5 , UpperCamelCase : float = 0.2 , **UpperCamelCase : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : Tuple = image_size
lowerCAmelCase__ : str = width_coefficient
lowerCAmelCase__ : List[Any] = depth_coefficient
lowerCAmelCase__ : Tuple = depth_divisor
lowerCAmelCase__ : Optional[Any] = kernel_sizes
lowerCAmelCase__ : Dict = in_channels
lowerCAmelCase__ : Tuple = out_channels
lowerCAmelCase__ : Tuple = depthwise_padding
lowerCAmelCase__ : Any = strides
lowerCAmelCase__ : Dict = num_block_repeats
lowerCAmelCase__ : Union[str, Any] = expand_ratios
lowerCAmelCase__ : Optional[Any] = squeeze_expansion_ratio
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[Any] = hidden_dim
lowerCAmelCase__ : Optional[int] = pooling_type
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Tuple = batch_norm_eps
lowerCAmelCase__ : Dict = batch_norm_momentum
lowerCAmelCase__ : Union[str, Any] = dropout_rate
lowerCAmelCase__ : Any = drop_connect_rate
lowerCAmelCase__ : Dict = sum(UpperCamelCase ) * 4
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Dict = version.parse("1.11" )
@property
def _lowerCAmelCase ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def _lowerCAmelCase ( self : int ) -> float:
"""simple docstring"""
return 1E-5
| 299
|
"""simple docstring"""
from __future__ import annotations
import time
_A = list[tuple[int, int]]
_A = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
_A = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class _lowerCamelCase :
def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Node | None ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = pos_x
lowerCAmelCase__ : Optional[Any] = pos_y
lowerCAmelCase__ : Optional[Any] = (pos_y, pos_x)
lowerCAmelCase__ : List[str] = goal_x
lowerCAmelCase__ : str = goal_y
lowerCAmelCase__ : Any = parent
class _lowerCamelCase :
def __init__( self : Any , UpperCamelCase : tuple[int, int] , UpperCamelCase : tuple[int, int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , UpperCamelCase )
lowerCAmelCase__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , UpperCamelCase )
lowerCAmelCase__ : Any = [self.start]
lowerCAmelCase__ : List[Any] = False
def _lowerCAmelCase ( self : int ) -> Path | None:
"""simple docstring"""
while self.node_queue:
lowerCAmelCase__ : List[str] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
lowerCAmelCase__ : Union[str, Any] = True
return self.retrace_path(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = self.get_successors(UpperCamelCase )
for node in successors:
self.node_queue.append(UpperCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _lowerCAmelCase ( self : List[str] , UpperCamelCase : Node ) -> list[Node]:
"""simple docstring"""
lowerCAmelCase__ : int = []
for action in delta:
lowerCAmelCase__ : Any = parent.pos_x + action[1]
lowerCAmelCase__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase , UpperCamelCase , self.target.pos_y , self.target.pos_x , UpperCamelCase ) )
return successors
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : Node | None ) -> Path:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = node
lowerCAmelCase__ : Tuple = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
lowerCAmelCase__ : List[str] = current_node.parent
path.reverse()
return path
class _lowerCamelCase :
def __init__( self : str , UpperCamelCase : List[str] , UpperCamelCase : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : int = BreadthFirstSearch(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[Any] = BreadthFirstSearch(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Dict = False
def _lowerCAmelCase ( self : str ) -> Path | None:
"""simple docstring"""
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
lowerCAmelCase__ : Dict = self.fwd_bfs.node_queue.pop(0 )
lowerCAmelCase__ : Dict = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
lowerCAmelCase__ : Dict = True
return self.retrace_bidirectional_path(
UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Optional[int] = current_bwd_node
lowerCAmelCase__ : Optional[Any] = current_fwd_node
lowerCAmelCase__ : Any = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : Node , UpperCamelCase : Node ) -> Path:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.fwd_bfs.retrace_path(UpperCamelCase )
lowerCAmelCase__ : str = self.bwd_bfs.retrace_path(UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
lowerCAmelCase__ : Union[str, Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
_A = (0, 0)
_A = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
_A = time.time()
_A = BreadthFirstSearch(init, goal)
_A = bfs.search()
_A = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
_A = time.time()
_A = BidirectionalBreadthFirstSearch(init, goal)
_A = bd_bfs.search()
_A = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 299
| 1
|
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
_a = JukeboxTokenizer
_a = {
"artist": "Zac Brown Band",
"genres": "Country",
"lyrics": "I met a traveller from an antique land,\n Who said \"Two vast and trunkless legs of stone\n Stand in the desert. . . . Near them, on the sand,\n Half sunk a shattered visage lies, whose frown,\n And wrinkled lip, and sneer of cold command,\n Tell that its sculptor well those passions read\n Which yet survive, stamped on these lifeless things,\n The hand that mocked them, and the heart that fed;\n And on the pedestal, these words appear:\n My name is Ozymandias, King of Kings;\n Look on my Works, ye Mighty, and despair!\n Nothing beside remains. Round the decay\n Of that colossal Wreck, boundless and bare\n The lone and level sands stretch far away\n ",
}
@require_torch
def _lowerCAmelCase ( self : Optional[int] ) ->str:
import torch
lowerCamelCase_ : Union[str, Any] = JukeboxTokenizer.from_pretrained("""openai/jukebox-1b-lyrics""" )
lowerCamelCase_ : Optional[int] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase_ : Dict = [
torch.tensor([[
0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
torch.tensor([[0, 0, 0, 1_069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def _lowerCAmelCase ( self : str ) ->str:
import torch
lowerCamelCase_ : Dict = JukeboxTokenizer.from_pretrained("""openai/jukebox-5b-lyrics""" )
lowerCamelCase_ : List[str] = tokenizer(**self.metas )["""input_ids"""]
# fmt: off
lowerCamelCase_ : Any = [
torch.tensor([[
0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 171
|
from ....configuration_utils import PretrainedConfig
from ....utils import logging
snake_case__ : int = logging.get_logger(__name__)
snake_case__ : List[str] = {
'Visual-Attention-Network/van-base': (
'https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'
),
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "van"
def __init__( self : int , __a : List[Any]=224 , __a : Dict=3 , __a : List[str]=[7, 3, 3, 3] , __a : Any=[4, 2, 2, 2] , __a : str=[64, 128, 320, 512] , __a : Dict=[3, 3, 12, 3] , __a : List[str]=[8, 8, 4, 4] , __a : List[str]="gelu" , __a : Optional[Any]=0.02 , __a : Dict=1e-6 , __a : List[str]=1e-2 , __a : Optional[int]=0.0 , __a : str=0.0 , **__a : Optional[Any] , ) ->str:
super().__init__(**__a )
lowerCamelCase_ : Optional[Any] = image_size
lowerCamelCase_ : List[str] = num_channels
lowerCamelCase_ : Union[str, Any] = patch_sizes
lowerCamelCase_ : List[Any] = strides
lowerCamelCase_ : Union[str, Any] = hidden_sizes
lowerCamelCase_ : Tuple = depths
lowerCamelCase_ : str = mlp_ratios
lowerCamelCase_ : Any = hidden_act
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Union[str, Any] = layer_norm_eps
lowerCamelCase_ : Union[str, Any] = layer_scale_init_value
lowerCamelCase_ : List[str] = drop_path_rate
lowerCamelCase_ : str = dropout_rate
| 171
| 1
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _snake_case ( __UpperCAmelCase ):
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase__ : Dict = 8
# DPR tok
UpperCAmelCase__ : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , """dpr_tokenizer""")
os.makedirs(snake_case__ , exist_ok=snake_case__)
UpperCAmelCase__ : Any = os.path.join(snake_case__ , DPR_VOCAB_FILES_NAMES["""vocab_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens]))
# BART tok
UpperCAmelCase__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
UpperCAmelCase__ : str = dict(zip(snake_case__ , range(len(snake_case__))))
UpperCAmelCase__ : Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
UpperCAmelCase__ : Any = {"""unk_token""": """<unk>"""}
UpperCAmelCase__ : str = os.path.join(self.tmpdirname , """bart_tokenizer""")
os.makedirs(snake_case__ , exist_ok=snake_case__)
UpperCAmelCase__ : Optional[Any] = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["""vocab_file"""])
UpperCAmelCase__ : Tuple = os.path.join(snake_case__ , BART_VOCAB_FILES_NAMES["""merges_file"""])
with open(self.vocab_file , """w""" , encoding="""utf-8""") as fp:
fp.write(json.dumps(snake_case__) + """\n""")
with open(self.merges_file , """w""" , encoding="""utf-8""") as fp:
fp.write("""\n""".join(snake_case__))
def snake_case__ ( self):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer"""))
def snake_case__ ( self):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer"""))
def snake_case__ ( self):
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , """rag_tokenizer""")
UpperCAmelCase__ : List[Any] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
UpperCAmelCase__ : int = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(snake_case__)
rag_tokenizer.save_pretrained(snake_case__)
UpperCAmelCase__ : Optional[int] = RagTokenizer.from_pretrained(snake_case__ , config=snake_case__)
self.assertIsInstance(new_rag_tokenizer.question_encoder , snake_case__)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , snake_case__)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def snake_case__ ( self):
UpperCAmelCase__ : int = RagTokenizer.from_pretrained("""facebook/rag-token-nq""")
UpperCAmelCase__ : Optional[int] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCAmelCase__ : Optional[int] = tokenizer(snake_case__)
self.assertIsNotNone(snake_case__)
@slow
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""")
UpperCAmelCase__ : Any = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
UpperCAmelCase__ : List[str] = tokenizer(snake_case__)
self.assertIsNotNone(snake_case__)
| 407
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"configuration_altclip": [
"ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AltCLIPConfig",
"AltCLIPTextConfig",
"AltCLIPVisionConfig",
],
"processing_altclip": ["AltCLIPProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"AltCLIPPreTrainedModel",
"AltCLIPModel",
"AltCLIPTextModel",
"AltCLIPVisionModel",
]
if TYPE_CHECKING:
from .configuration_altclip import (
ALTCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
AltCLIPConfig,
AltCLIPTextConfig,
AltCLIPVisionConfig,
)
from .processing_altclip import AltCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_altclip import (
ALTCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
AltCLIPModel,
AltCLIPPreTrainedModel,
AltCLIPTextModel,
AltCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 611
| 0
|
def snake_case_ (__A : int ) -> bool:
if number < 0:
raise ValueError("""number must not be negative""" )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218
|
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def snake_case_ (__A : str , __A : str , __A : str , __A : PreTrainedTokenizer , __A : int , __A : Optional[int] = None , ) -> Tuple:
__lowerCAmelCase : int = {}
if train_file is not None:
__lowerCAmelCase : Optional[Any] = [train_file]
if eval_file is not None:
__lowerCAmelCase : Dict = [eval_file]
if test_file is not None:
__lowerCAmelCase : Tuple = [test_file]
__lowerCAmelCase : Dict = datasets.load_dataset("""csv""" , data_files=__A )
__lowerCAmelCase : Optional[Any] = list(ds[list(files.keys() )[0]].features.keys() )
__lowerCAmelCase : Optional[Any] = features_name.pop(__A )
__lowerCAmelCase : int = list(set(ds[list(files.keys() )[0]][label_name] ) )
__lowerCAmelCase : Optional[Any] = {label: i for i, label in enumerate(__A )}
__lowerCAmelCase : Union[str, Any] = tokenizer.model_input_names
__lowerCAmelCase : List[Any] = {}
if len(__A ) == 1:
for k in files.keys():
__lowerCAmelCase : Tuple = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__A , max_length=__A , padding="""max_length""" ) , batched=__A , )
elif len(__A ) == 2:
for k in files.keys():
__lowerCAmelCase : Optional[int] = ds[k].map(
lambda __A : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__A , max_length=__A , padding="""max_length""" , ) , batched=__A , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__lowerCAmelCase : Union[str, Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__lowerCAmelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : List[str] = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__lowerCAmelCase : Optional[Any] = {k: v for k, v in ex.items() if k in input_names}
__lowerCAmelCase : str = labelaid[ex[label_name]]
yield (d, label)
__lowerCAmelCase : Dict = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__lowerCAmelCase : str = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__lowerCAmelCase : Dict = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__lowerCAmelCase : List[str] = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__lowerCAmelCase : Optional[Any] = (
tf.data.Dataset.from_generator(
__A , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__lowerCAmelCase : Any = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : int =field(metadata={"help": "Which column contains the label"} )
lowerCamelCase : str =field(default=a_ , metadata={"help": "The path of the training file"} )
lowerCamelCase : Optional[str] =field(default=a_ , metadata={"help": "The path of the development file"} )
lowerCamelCase : Optional[str] =field(default=a_ , metadata={"help": "The path of the test file"} )
lowerCamelCase : int =field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
lowerCamelCase : bool =field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
lowerCamelCase : str =field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
lowerCamelCase : bool =field(default=a_ , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowerCamelCase : Optional[str] =field(
default=a_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def snake_case_ () -> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__lowerCAmelCase : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : str = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__lowerCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase : int = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__A , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__lowerCAmelCase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__A ) , labelaid=__A , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__lowerCAmelCase : Tuple = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=__A , cache_dir=model_args.cache_dir , )
def compute_metrics(__A : EvalPrediction ) -> Dict:
__lowerCAmelCase : str = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__lowerCAmelCase : Tuple = TFTrainer(
model=__A , args=__A , train_dataset=__A , eval_dataset=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__lowerCAmelCase : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__lowerCAmelCase : List[str] = trainer.evaluate()
__lowerCAmelCase : Any = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(__A , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(__A )
return results
if __name__ == "__main__":
main()
| 218
| 1
|
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> List[str]:
"""simple docstring"""
UpperCamelCase = FunnelConfig.from_json_file(_UpperCamelCase)
print(F'Building PyTorch model from configuration: {config}')
UpperCamelCase = FunnelBaseModel(_UpperCamelCase) if base_model else FunnelModel(_UpperCamelCase)
# Load weights from tf checkpoint
load_tf_weights_in_funnel(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}')
torch.save(model.state_dict() , _UpperCamelCase)
if __name__ == "__main__":
__magic_name__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--base_model''', action='''store_true''', help='''Whether you want just the base model (no decoder) or not.'''
)
__magic_name__ : Optional[Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 280
|
import torch
from torch import nn
class A__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=1 , _SCREAMING_SNAKE_CASE : List[str]=False ):
"""simple docstring"""
super().__init__()
UpperCamelCase = n_token
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [n_token]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase = nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase = nn.ModuleList()
UpperCamelCase = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
else:
self.out_projs.append(_SCREAMING_SNAKE_CASE )
self.out_layers.append(nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
self.out_layers.append(nn.Linear(_SCREAMING_SNAKE_CASE , r_idx - l_idx ) )
UpperCamelCase = keep_order
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
if proj is None:
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , proj.t().contiguous() )
UpperCamelCase = nn.functional.linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase = hidden[..., :-1, :].contiguous()
UpperCamelCase = labels[..., 1:].contiguous()
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCamelCase = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase = labels != -100
UpperCamelCase = torch.zeros_like(_SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = (
-nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_SCREAMING_SNAKE_CASE )
biases.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
if labels is None:
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase = torch.zeros_like(_SCREAMING_SNAKE_CASE , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase = 0
UpperCamelCase = [0] + self.cutoffs
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase = (labels >= l_idx) & (labels < r_idx)
UpperCamelCase = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase = labels.index_select(0 , _SCREAMING_SNAKE_CASE ) - l_idx
UpperCamelCase = head_logprob.index_select(0 , _SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden.index_select(0 , _SCREAMING_SNAKE_CASE )
else:
UpperCamelCase = hidden
if i == 0:
if labels is not None:
UpperCamelCase = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase = logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _SCREAMING_SNAKE_CASE , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if self.n_clusters == 0:
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=-1 )
else:
# construct weights and biases
UpperCamelCase , UpperCamelCase = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase = self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i].weight
UpperCamelCase = self.out_layers[i].bias
if i == 0:
UpperCamelCase = torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_SCREAMING_SNAKE_CASE )
biases.append(_SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[0], biases[0], self.out_projs[0]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = [0] + self.cutoffs
for i in range(len(_SCREAMING_SNAKE_CASE ) - 1 ):
UpperCamelCase , UpperCamelCase = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase = head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase , UpperCamelCase , UpperCamelCase = weights[i], biases[i], self.out_projs[i]
UpperCamelCase = self._compute_logit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = nn.functional.log_softmax(_SCREAMING_SNAKE_CASE , dim=1 )
UpperCamelCase = head_logprob[:, -i] + tail_logprob_i
UpperCamelCase = logprob_i
return out
| 280
| 1
|
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->int:
"""simple docstring"""
_lowerCamelCase : List[Any] = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(_UpperCamelCase , """tf_padding"""))
self.parent.assertTrue(hasattr(_UpperCamelCase , """depth_multiplier"""))
class __snake_case :
'''simple docstring'''
def __init__( self : int , _UpperCamelCase : str , _UpperCamelCase : Dict=13 , _UpperCamelCase : Optional[Any]=3 , _UpperCamelCase : int=32 , _UpperCamelCase : Any=0.2_5 , _UpperCamelCase : str=8 , _UpperCamelCase : int=True , _UpperCamelCase : List[Any]=1024 , _UpperCamelCase : Optional[Any]=32 , _UpperCamelCase : Tuple="relu6" , _UpperCamelCase : List[Any]=0.1 , _UpperCamelCase : Optional[int]=0.0_2 , _UpperCamelCase : str=True , _UpperCamelCase : List[Any]=True , _UpperCamelCase : List[str]=10 , _UpperCamelCase : Optional[int]=None , ) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = parent
_lowerCamelCase : Tuple = batch_size
_lowerCamelCase : str = num_channels
_lowerCamelCase : Tuple = image_size
_lowerCamelCase : Union[str, Any] = depth_multiplier
_lowerCamelCase : Tuple = min_depth
_lowerCamelCase : List[str] = tf_padding
_lowerCamelCase : Optional[int] = int(last_hidden_size * depth_multiplier)
_lowerCamelCase : Optional[int] = output_stride
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = classifier_dropout_prob
_lowerCamelCase : List[str] = use_labels
_lowerCamelCase : Dict = is_training
_lowerCamelCase : Tuple = num_labels
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : List[Any] = scope
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Optional[Any] = None
if self.use_labels:
_lowerCamelCase : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
_lowerCamelCase : str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
_lowerCamelCase : List[str] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Dict:
"""simple docstring"""
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[Any]) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : List[str] = MobileNetVaModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : List[str] = model(_UpperCamelCase)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int) ->Optional[int]:
"""simple docstring"""
_lowerCamelCase : str = self.num_labels
_lowerCamelCase : int = MobileNetVaForImageClassification(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowerCamelCase : Optional[int] = model(_UpperCamelCase , labels=_UpperCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Dict) ->Tuple:
"""simple docstring"""
_lowerCamelCase : Any = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict = config_and_inputs
_lowerCamelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_snake_case = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
_snake_case = (
{'feature-extraction': MobileNetVaModel, 'image-classification': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
_snake_case = False
_snake_case = False
_snake_case = False
_snake_case = False
def _SCREAMING_SNAKE_CASE ( self : str) ->int:
"""simple docstring"""
_lowerCamelCase : Dict = MobileNetVaModelTester(self)
_lowerCamelCase : List[Any] = MobileNetVaConfigTester(self , config_class=_UpperCamelCase , has_text_modality=_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : str) ->Any:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileNetV1 does not use inputs_embeds""")
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not support input and output embeddings""")
def _SCREAMING_SNAKE_CASE ( self : str) ->Dict:
"""simple docstring"""
pass
@unittest.skip(reason="""MobileNetV1 does not output attentions""")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Any:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->List[str]:
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : str = model_class(_UpperCamelCase)
_lowerCamelCase : str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase : int = [*signature.parameters.keys()]
_lowerCamelCase : Dict = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->str:
"""simple docstring"""
def check_hidden_states_output(_UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any]):
_lowerCamelCase : Optional[int] = model_class(_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**self._prepare_for_class(_UpperCamelCase , _UpperCamelCase))
_lowerCamelCase : Dict = outputs.hidden_states
_lowerCamelCase : Any = 26
self.assertEqual(len(_UpperCamelCase) , _UpperCamelCase)
_lowerCamelCase , _lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase : int = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase : List[str] = True
check_hidden_states_output(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCamelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any) ->List[Any]:
"""simple docstring"""
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase : Optional[int] = MobileNetVaModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def A__ ( ):
'''simple docstring'''
_lowerCamelCase : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[int]:
"""simple docstring"""
return (
MobileNetVaImageProcessor.from_pretrained("""google/mobilenet_v1_1.0_224""") if is_vision_available() else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = MobileNetVaForImageClassification.from_pretrained("""google/mobilenet_v1_1.0_224""").to(_UpperCamelCase)
_lowerCamelCase : Optional[Any] = self.default_image_processor
_lowerCamelCase : str = prepare_img()
_lowerCamelCase : Optional[int] = image_processor(images=_UpperCamelCase , return_tensors="""pt""").to(_UpperCamelCase)
# forward pass
with torch.no_grad():
_lowerCamelCase : Optional[Any] = model(**_UpperCamelCase)
# verify the logits
_lowerCamelCase : Optional[Any] = torch.Size((1, 1001))
self.assertEqual(outputs.logits.shape , _UpperCamelCase)
_lowerCamelCase : List[str] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5]).to(_UpperCamelCase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4))
| 15
|
lowerCAmelCase : Tuple =0 # The first color of the flag.
lowerCAmelCase : Union[str, Any] =1 # The second color of the flag.
lowerCAmelCase : Any =2 # The third color of the flag.
lowerCAmelCase : List[str] =(red, white, blue)
def A__ ( __A ):
'''simple docstring'''
if not sequence:
return []
if len(__A ) == 1:
return list(__A )
_lowerCamelCase : int = 0
_lowerCamelCase : Dict = len(__A ) - 1
_lowerCamelCase : str = 0
while mid <= high:
if sequence[mid] == colors[0]:
_lowerCamelCase , _lowerCamelCase : Tuple = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
_lowerCamelCase , _lowerCamelCase : str = sequence[high], sequence[mid]
high -= 1
else:
_lowerCamelCase : int = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(__A )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase : List[str] =input("Enter numbers separated by commas:\n").strip()
lowerCAmelCase : Dict =[int(item.strip()) for item in user_input.split(",")]
print(F"""{dutch_national_flag_sort(unsorted)}""")
| 15
| 1
|
"""simple docstring"""
def __magic_name__ ( UpperCamelCase : int , UpperCamelCase : int ) -> List[str]:
return int((input_a, input_a).count(1 ) != 0 )
def __magic_name__ ( ) -> Union[str, Any]:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 273
|
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase__ = re.compile(r'''([A-Z]+)([A-Z][a-z])''')
UpperCAmelCase__ = re.compile(r'''([a-z\d])([A-Z])''')
UpperCAmelCase__ = re.compile(r'''(?<!_)_(?!_)''')
UpperCAmelCase__ = re.compile(r'''(_{2,})''')
UpperCAmelCase__ = r'''^\w+(\.\w+)*$'''
UpperCAmelCase__ = r'''<>:/\|?*'''
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__A= _uppercase_uppercase_re.sub(r'\1_\2',_SCREAMING_SNAKE_CASE )
__A= _lowercase_uppercase_re.sub(r'\1_\2',_SCREAMING_SNAKE_CASE )
return name.lower()
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__A= _single_underscore_re.split(_SCREAMING_SNAKE_CASE )
__A= [_multiple_underscores_re.split(_SCREAMING_SNAKE_CASE ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(_SCREAMING_SNAKE_CASE ) if n != '' )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
return camelcase_to_snakecase(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Union[str, Any],_SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if os.path.basename(_SCREAMING_SNAKE_CASE ) != name:
raise ValueError(f"""Should be a dataset name, not a path: {name}""" )
if not re.match(_split_re,_SCREAMING_SNAKE_CASE ):
raise ValueError(f"""Split name should match '{_split_re}'' but got '{split}'.""" )
return f"""{filename_prefix_for_name(_SCREAMING_SNAKE_CASE )}-{split}"""
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : Union[str, Any],_SCREAMING_SNAKE_CASE : Optional[Any],_SCREAMING_SNAKE_CASE : Optional[int],_SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
__A= filename_prefix_for_split(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
if filetype_suffix:
prefix += f""".{filetype_suffix}"""
__A= os.path.join(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
return f"""{filepath}*"""
def UpperCAmelCase__( _SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : List[str],_SCREAMING_SNAKE_CASE : int,_SCREAMING_SNAKE_CASE : List[Any]=None,_SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
__A= filename_prefix_for_split(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
__A= os.path.join(_SCREAMING_SNAKE_CASE,_SCREAMING_SNAKE_CASE )
if shard_lengths:
__A= len(_SCREAMING_SNAKE_CASE )
__A= [f"""{prefix}-{shard_id:05d}-of-{num_shards:05d}""" for shard_id in range(_SCREAMING_SNAKE_CASE )]
if filetype_suffix:
__A= [filename + f""".{filetype_suffix}""" for filename in filenames]
return filenames
else:
__A= prefix
if filetype_suffix:
filename += f""".{filetype_suffix}"""
return [filename]
| 186
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class snake_case__ ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
__A = StableDiffusionPanoramaPipeline
__A = TEXT_TO_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_BATCH_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
__A = TEXT_TO_IMAGE_IMAGE_PARAMS
def UpperCamelCase ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
UpperCAmelCase_ = DDIMScheduler()
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
UpperCAmelCase_ = CLIPTextModel(UpperCAmelCase__ )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def UpperCamelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Any=0 ) -> Dict:
UpperCAmelCase_ = torch.manual_seed(UpperCAmelCase__ )
UpperCAmelCase_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase_ = self.get_dummy_inputs(UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe(**UpperCAmelCase__ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Dict ) -> Dict:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase ( self : Tuple ) -> List[str]:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def UpperCamelCase ( self : Tuple ) -> Optional[int]:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase_ = self.get_dummy_inputs(UpperCAmelCase__ )
UpperCAmelCase_ = '''french fries'''
UpperCAmelCase_ = sd_pipe(**UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Dict ) -> Any:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase_ = self.get_dummy_inputs(UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe(**UpperCAmelCase__ , view_batch_size=2 )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : str ) -> Dict:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase_ = self.get_dummy_inputs(UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe(**UpperCAmelCase__ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def UpperCamelCase ( self : Tuple ) -> List[Any]:
UpperCAmelCase_ = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=UpperCAmelCase__ )
UpperCAmelCase_ = StableDiffusionPanoramaPipeline(**UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
UpperCAmelCase_ = self.get_dummy_inputs(UpperCAmelCase__ )
UpperCAmelCase_ = sd_pipe(**UpperCAmelCase__ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
UpperCAmelCase_ = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : int ) -> Any:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self : int , lowerCAmelCase_ : int=0 ) -> Optional[int]:
UpperCAmelCase_ = torch.manual_seed(UpperCAmelCase__ )
UpperCAmelCase_ = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def UpperCamelCase ( self : Any ) -> List[str]:
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**UpperCAmelCase__ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCAmelCase_ = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def UpperCamelCase ( self : Any ) -> int:
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=UpperCAmelCase__ )
UpperCAmelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**UpperCAmelCase__ ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 20_48, 3)
UpperCAmelCase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def UpperCamelCase ( self : Optional[int] ) -> Any:
UpperCAmelCase_ = 0
def callback_fn(lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : torch.FloatTensor ) -> None:
UpperCAmelCase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
UpperCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
UpperCAmelCase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 2_56)
UpperCAmelCase_ = latents[0, -3:, -3:, -1]
UpperCAmelCase_ = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
UpperCAmelCase_ = False
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
UpperCAmelCase_ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing()
UpperCAmelCase_ = self.get_inputs()
pipe(**UpperCAmelCase__ , callback=UpperCAmelCase__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def UpperCamelCase ( self : int ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = '''stabilityai/stable-diffusion-2-base'''
UpperCAmelCase_ = DDIMScheduler.from_pretrained(UpperCAmelCase__ , subfolder='''scheduler''' )
UpperCAmelCase_ = StableDiffusionPanoramaPipeline.from_pretrained(UpperCAmelCase__ , scheduler=UpperCAmelCase__ , safety_checker=UpperCAmelCase__ )
UpperCAmelCase_ = pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = self.get_inputs()
UpperCAmelCase_ = pipe(**UpperCAmelCase__ )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 700
|
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _lowerCAmelCase ( __magic_name__ :Optional[int] , __magic_name__ :str , __magic_name__ :str , __magic_name__ :Path , __magic_name__ :str = None , __magic_name__ :str = None , __magic_name__ :str = None , ):
if config_name_or_path is None:
UpperCAmelCase_ = '''facebook/rag-token-base''' if model_type == '''rag_token''' else '''facebook/rag-sequence-base'''
if generator_tokenizer_name_or_path is None:
UpperCAmelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase_ = question_encoder_name_or_path
UpperCAmelCase_ = RagTokenForGeneration if model_type == '''rag_token''' else RagSequenceForGeneration
# Save model.
UpperCAmelCase_ = RagConfig.from_pretrained(__magic_name__ )
UpperCAmelCase_ = AutoConfig.from_pretrained(__magic_name__ )
UpperCAmelCase_ = AutoConfig.from_pretrained(__magic_name__ )
UpperCAmelCase_ = gen_config
UpperCAmelCase_ = question_encoder_config
UpperCAmelCase_ = model_class.from_pretrained_question_encoder_generator(
__magic_name__ , __magic_name__ , config=__magic_name__ )
rag_model.save_pretrained(__magic_name__ )
# Sanity check.
model_class.from_pretrained(__magic_name__ )
# Save tokenizers.
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__magic_name__ )
gen_tokenizer.save_pretrained(dest_dir / '''generator_tokenizer/''' )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__magic_name__ )
question_encoder_tokenizer.save_pretrained(dest_dir / '''question_encoder_tokenizer/''' )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_type',
choices=['rag_sequence', 'rag_token'],
required=True,
type=str,
help='RAG model type: rag_sequence, rag_token',
)
parser.add_argument('--dest', type=str, required=True, help='Path to the output checkpoint directory.')
parser.add_argument('--generator_name_or_path', type=str, required=True, help='Generator model identifier')
parser.add_argument(
'--question_encoder_name_or_path', type=str, required=True, help='Question encoder model identifier'
)
parser.add_argument(
'--generator_tokenizer_name_or_path',
type=str,
help='Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``',
)
parser.add_argument(
'--question_encoder_tokenizer_name_or_path',
type=str,
help='Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``',
)
parser.add_argument(
'--config_name_or_path',
type=str,
help=(
'Identifier of the model config to use, if not provided, resolves to a base config for a given'
' ``model_type``'
),
)
_lowerCamelCase : List[str] = parser.parse_args()
_lowerCamelCase : int = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 407
| 0
|
'''simple docstring'''
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
__lowerCAmelCase = (DDPMParallelScheduler,)
def __UpperCAmelCase ( self : str, **UpperCamelCase__ : List[str] ) -> Tuple:
_A = {
'num_train_timesteps': 10_00,
'beta_start': 0.0_001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**UpperCamelCase__ )
return config
def __UpperCAmelCase ( self : List[Any] ) -> Any:
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1], [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=UpperCamelCase__, beta_end=UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> str:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
self.check_over_configs(thresholding=UpperCamelCase__ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCamelCase__, prediction_type=UpperCamelCase__, sample_max_value=UpperCamelCase__, )
def __UpperCAmelCase ( self : Optional[Any] ) -> Dict:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=UpperCamelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00_979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = len(UpperCamelCase__ )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = self.dummy_sample_deter + 0.1
_A = self.dummy_sample_deter - 0.1
_A = samplea.shape[0]
_A = torch.stack([samplea, samplea, samplea], dim=0 )
_A = torch.arange(UpperCamelCase__ )[0:3, None].repeat(1, UpperCamelCase__ )
_A = model(samples.flatten(0, 1 ), timesteps.flatten(0, 1 ) )
_A = scheduler.batch_step_no_noise(UpperCamelCase__, timesteps.flatten(0, 1 ), samples.flatten(0, 1 ) )
_A = torch.sum(torch.abs(UpperCamelCase__ ) )
_A = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 1_153.1_833 ) < 1e-2
assert abs(result_mean.item() - 0.5_005 ) < 1e-3
def __UpperCAmelCase ( self : List[str] ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = len(UpperCamelCase__ )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
_A = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(UpperCamelCase__ ) )
_A = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1e-2
assert abs(result_mean.item() - 0.3_372 ) < 1e-3
def __UpperCAmelCase ( self : Dict ) -> Any:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config(prediction_type='v_prediction' )
_A = scheduler_class(**UpperCamelCase__ )
_A = len(UpperCamelCase__ )
_A = self.dummy_model()
_A = self.dummy_sample_deter
_A = torch.manual_seed(0 )
for t in reversed(range(UpperCamelCase__ ) ):
# 1. predict noise residual
_A = model(UpperCamelCase__, UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
_A = scheduler.step(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, generator=UpperCamelCase__ ).prev_sample
_A = pred_prev_sample
_A = torch.sum(torch.abs(UpperCamelCase__ ) )
_A = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1e-2
assert abs(result_mean.item() - 0.2_631 ) < 1e-3
def __UpperCAmelCase ( self : Optional[int] ) -> Tuple:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
_A = scheduler.timesteps
for i, timestep in enumerate(UpperCamelCase__ ):
if i == len(UpperCamelCase__ ) - 1:
_A = -1
else:
_A = timesteps[i + 1]
_A = scheduler.previous_timestep(UpperCamelCase__ )
_A = prev_t.item()
self.assertEqual(UpperCamelCase__, UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = [1_00, 87, 50, 51, 0]
with self.assertRaises(UpperCamelCase__, msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
def __UpperCAmelCase ( self : Any ) -> Dict:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = [1_00, 87, 50, 1, 0]
_A = len(UpperCamelCase__ )
with self.assertRaises(UpperCamelCase__, msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase__, timesteps=UpperCamelCase__ )
def __UpperCAmelCase ( self : Dict ) -> Dict:
_A = self.scheduler_classes[0]
_A = self.get_scheduler_config()
_A = scheduler_class(**UpperCamelCase__ )
_A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase__, msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}', ):
scheduler.set_timesteps(timesteps=UpperCamelCase__ )
| 107
|
import copy
import random
from transformers import CLIPTokenizer
class __A ( lowerCamelCase__ ):
"""simple docstring"""
def __init__( self , *a__ , **a__):
"""simple docstring"""
super().__init__(*a__ , **a__)
_lowerCamelCase : Optional[Any] = {}
def __snake_case ( self , a__ , *a__ , **a__):
"""simple docstring"""
_lowerCamelCase : Optional[int] = super().add_tokens(a__ , *a__ , **a__)
if num_added_tokens == 0:
raise ValueError(
F"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
''' `placeholder_token` that is not already in the tokenizer.''')
def __snake_case ( self , a__ , *a__ , a__=1 , **a__):
"""simple docstring"""
_lowerCamelCase : Tuple = []
if num_vec_per_token == 1:
self.try_adding_tokens(a__ , *a__ , **a__)
output.append(a__)
else:
_lowerCamelCase : Dict = []
for i in range(a__):
_lowerCamelCase : Optional[Any] = placeholder_token + F"""_{i}"""
self.try_adding_tokens(a__ , *a__ , **a__)
output.append(a__)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
F"""The tokenizer already has placeholder token {token} that can get confused with"""
F""" {placeholder_token}keep placeholder tokens independent""")
_lowerCamelCase : int = output
def __snake_case ( self , a__ , a__=False , a__=1.0):
"""simple docstring"""
if isinstance(a__ , a__):
_lowerCamelCase : Tuple = []
for i in range(len(a__)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a__))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCamelCase : Union[str, Any] = self.token_map[placeholder_token]
_lowerCamelCase : Optional[Any] = tokens[: 1 + int(len(a__) * prop_tokens_to_load)]
if vector_shuffle:
_lowerCamelCase : int = copy.copy(a__)
random.shuffle(a__)
_lowerCamelCase : List[str] = text.replace(a__ , ''' '''.join(a__))
return text
def __call__( self , a__ , *a__ , a__=False , a__=1.0 , **a__):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__) , *a__ , **a__ , )
def __snake_case ( self , a__ , *a__ , a__=False , a__=1.0 , **a__):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
a__ , vector_shuffle=a__ , prop_tokens_to_load=a__) , *a__ , **a__ , )
| 114
| 0
|
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[str] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : int ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : str , **__lowerCamelCase : Dict ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[str] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : List[str] , **__lowerCamelCase : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : int , **__lowerCamelCase : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : int , **__lowerCamelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : str ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[str] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : Any , **__lowerCamelCase : Union[str, Any] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[int] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : str ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : str , **__lowerCamelCase : Any ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Any ) -> Dict:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : int , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
def UpperCAmelCase_ ( *_A , **_A ):
'''simple docstring'''
requires_backends(a_ , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : str ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[Any] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : int ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : Optional[int] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : str , **__lowerCamelCase : List[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[int] ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Any , **__lowerCamelCase : str ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[str] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Tuple , **__lowerCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : List[str] , **__lowerCamelCase : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Dict , **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : List[Any] , **__lowerCamelCase : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Dict ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : str ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[str] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : List[Any] , **__lowerCamelCase : str ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Dict , **__lowerCamelCase : str ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : List[Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Optional[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : int ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : str ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[Any] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Dict , **__lowerCamelCase : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : str , **__lowerCamelCase : int ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : int ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Any , **__lowerCamelCase : str ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : str , **__lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Dict ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : int , **__lowerCamelCase : str ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : List[Any] , **__lowerCamelCase : List[Any] ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Dict ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[str] ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Optional[int] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Any , **__lowerCamelCase : str ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : str , **__lowerCamelCase : Dict ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : int , **__lowerCamelCase : Tuple ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[int] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Dict , **__lowerCamelCase : int ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Any , **__lowerCamelCase : Dict ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[int] ) -> str:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : List[Any] ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Any , **__lowerCamelCase : List[str] ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : str ) -> Optional[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : int , **__lowerCamelCase : Dict ) -> int:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : List[str] , **__lowerCamelCase : str ) -> int:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : str , *__lowerCamelCase : Dict , **__lowerCamelCase : List[str] ) -> Union[str, Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Optional[Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Union[str, Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ) -> Tuple:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Union[str, Any] ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Union[str, Any] , *__lowerCamelCase : str , **__lowerCamelCase : Union[str, Any] ) -> Dict:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : Tuple , **__lowerCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Tuple ) -> List[str]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Any ) -> List[Any]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : List[Any] ) -> int:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Union[str, Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Optional[Any] ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : List[str] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Tuple ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : Optional[Any] , **__lowerCamelCase : Tuple ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : List[str] , *__lowerCamelCase : List[str] , **__lowerCamelCase : int ) -> List[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[int] , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[str] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : int , **__lowerCamelCase : List[str] ) -> Dict:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Dict , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Any ) -> Optional[int]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : str , *__lowerCamelCase : Tuple , **__lowerCamelCase : List[Any] ) -> Optional[int]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[int] , *__lowerCamelCase : List[Any] , **__lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : int , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ) -> str:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Dict , *__lowerCamelCase : int , **__lowerCamelCase : str ) -> List[str]:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Tuple , *__lowerCamelCase : str , **__lowerCamelCase : List[str] ) -> Optional[Any]:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : List[str] , **__lowerCamelCase : Dict ) -> Any:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Optional[Any] , *__lowerCamelCase : Optional[int] , **__lowerCamelCase : Dict ) -> str:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : List[Any] , **__lowerCamelCase : str ) -> Any:
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase__ ( metaclass=_UpperCAmelCase ):
"""simple docstring"""
a = ["torch"]
def __init__( self : Optional[Any] , *__lowerCamelCase : Dict , **__lowerCamelCase : Optional[Any] ) -> List[str]:
requires_backends(self , ['''torch'''] )
@classmethod
def lowercase_ ( cls : Any , *__lowerCamelCase : List[str] , **__lowerCamelCase : Any ) -> Tuple:
requires_backends(cls , ['''torch'''] )
@classmethod
def lowercase_ ( cls : int , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Optional[Any] ) -> Dict:
requires_backends(cls , ['''torch'''] )
| 718
|
import random
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = a[left_index]
SCREAMING_SNAKE_CASE__ = left_index + 1
for j in range(left_index + 1 , _A ):
if a[j] < pivot:
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = a[i], a[j]
i += 1
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = a[i - 1], a[left_index]
return i - 1
def UpperCAmelCase_ ( _A , _A , _A ):
'''simple docstring'''
if left < right:
SCREAMING_SNAKE_CASE__ = random.randint(_A , right - 1 )
SCREAMING_SNAKE_CASE__,SCREAMING_SNAKE_CASE__ = (
a[left],
a[pivot],
) # switches the pivot with the left most bound
SCREAMING_SNAKE_CASE__ = partition(_A , _A , _A )
quick_sort_random(
_A , _A , _A ) # recursive quicksort to the left of the pivot point
quick_sort_random(
_A , pivot_index + 1 , _A ) # recursive quicksort to the right of the pivot point
def UpperCAmelCase_ ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = input('''Enter numbers separated by a comma:\n''' ).strip()
SCREAMING_SNAKE_CASE__ = [int(_A ) for item in user_input.split(''',''' )]
quick_sort_random(_A , 0 , len(_A ) )
print(_A )
if __name__ == "__main__":
main()
| 472
| 0
|
'''simple docstring'''
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class SCREAMING_SNAKE_CASE (a__ ):
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
'''simple docstring'''
super().__init__()
__A : List[Any] = value_function
__A : str = unet
__A : Tuple = scheduler
__A : List[Any] = env
__A : List[str] = env.get_dataset()
__A : List[str] = {}
for key in self.data.keys():
try:
__A : Optional[int] = self.data[key].mean()
except: # noqa: E722
pass
__A : str = {}
for key in self.data.keys():
try:
__A : Optional[int] = self.data[key].std()
except: # noqa: E722
pass
__A : Optional[Any] = env.observation_space.shape[0]
__A : List[Any] = env.action_space.shape[0]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return (x_in - self.means[key]) / self.stds[key]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
return x_in * self.stds[key] + self.means[key]
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
if type(_UpperCAmelCase) is dict:
return {k: self.to_torch(_UpperCAmelCase) for k, v in x_in.items()}
elif torch.is_tensor(_UpperCAmelCase):
return x_in.to(self.unet.device)
return torch.tensor(_UpperCAmelCase , device=self.unet.device)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
for key, val in cond.items():
__A : List[Any] = val.clone()
return x_in
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : List[str] = x.shape[0]
__A : Union[str, Any] = None
for i in tqdm.tqdm(self.scheduler.timesteps):
# create batch of timesteps to pass into model
__A : str = torch.full((batch_size,) , _UpperCAmelCase , device=self.unet.device , dtype=torch.long)
for _ in range(_UpperCAmelCase):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
__A : Optional[int] = self.value_function(x.permute(0 , 2 , 1) , _UpperCAmelCase).sample
__A : int = torch.autograd.grad([y.sum()] , [x])[0]
__A : Optional[Any] = self.scheduler._get_variance(_UpperCAmelCase)
__A : int = torch.exp(0.5 * posterior_variance)
__A : int = model_std * grad
__A : Optional[Any] = 0
__A : List[str] = x.detach()
__A : Tuple = x + scale * grad
__A : Optional[Any] = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim)
__A : int = self.unet(x.permute(0 , 2 , 1) , _UpperCAmelCase).sample.permute(0 , 2 , 1)
# TODO: verify deprecation of this kwarg
__A : int = self.scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , predict_epsilon=_UpperCAmelCase)['prev_sample']
# apply conditions to the trajectory (set the initial state)
__A : Optional[int] = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim)
__A : Dict = self.to_torch(_UpperCAmelCase)
return x, y
def __call__( self , _UpperCAmelCase , _UpperCAmelCase=64 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1):
'''simple docstring'''
__A : str = self.normalize(_UpperCAmelCase , 'observations')
__A : str = obs[None].repeat(_UpperCAmelCase , axis=0)
__A : Any = {0: self.to_torch(_UpperCAmelCase)}
__A : Any = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
__A : Tuple = randn_tensor(_UpperCAmelCase , device=self.unet.device)
__A : Tuple = self.reset_xa(_UpperCAmelCase , _UpperCAmelCase , self.action_dim)
__A : Tuple = self.to_torch(_UpperCAmelCase)
# run the diffusion process
__A ,__A : Any = self.run_diffusion(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
# sort output trajectories by value
__A : Any = y.argsort(0 , descending=_UpperCAmelCase).squeeze()
__A : str = x[sorted_idx]
__A : Dict = sorted_values[:, :, : self.action_dim]
__A : Dict = actions.detach().cpu().numpy()
__A : Dict = self.de_normalize(_UpperCAmelCase , key='actions')
# select the action with the highest value
if y is not None:
__A : int = 0
else:
# if we didn't run value guiding, select a random action
__A : Any = np.random.randint(0 , _UpperCAmelCase)
__A : str = denorm_actions[selected_index, 0]
return denorm_actions
| 8
|
import unittest
from transformers import BigBirdConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
from transformers.models.big_bird.modeling_flax_big_bird import (
FlaxBigBirdForCausalLM,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForPreTraining,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
FlaxBigBirdModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=56 , lowercase_ : str=True , lowercase_ : Optional[Any]=True , lowercase_ : List[Any]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=99 , lowercase_ : Optional[int]=32 , lowercase_ : Tuple=2 , lowercase_ : int=2 , lowercase_ : List[str]=7 , lowercase_ : Any="gelu_new" , lowercase_ : List[str]=0.1 , lowercase_ : str=0.1 , lowercase_ : List[Any]=512 , lowercase_ : List[str]=16 , lowercase_ : Optional[int]=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Union[str, Any]=4 , lowercase_ : Union[str, Any]="block_sparse" , lowercase_ : Tuple=True , lowercase_ : Dict=False , lowercase_ : Dict=2 , lowercase_ : Dict=3 , ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_attention_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_choices
_UpperCamelCase = rescale_embeddings
_UpperCamelCase = attention_type
_UpperCamelCase = use_bias
_UpperCamelCase = block_size
_UpperCamelCase = num_random_blocks
def __UpperCAmelCase ( self : List[Any]) -> Dict:
"""simple docstring"""
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCamelCase = None
if self.use_attention_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCamelCase = BigBirdConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = config_and_inputs
_UpperCamelCase = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_flax
class _UpperCAmelCase ( lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (
(
FlaxBigBirdForCausalLM,
FlaxBigBirdModel,
FlaxBigBirdForPreTraining,
FlaxBigBirdForMaskedLM,
FlaxBigBirdForMultipleChoice,
FlaxBigBirdForQuestionAnswering,
FlaxBigBirdForSequenceClassification,
FlaxBigBirdForTokenClassification,
)
if is_flax_available()
else ()
)
__A = False
__A = False
def __UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = FlaxBigBirdModelTester(self)
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Union[str, Any]) -> List[str]:
"""simple docstring"""
super().test_from_pretrained_save_pretrained()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Any) -> int:
"""simple docstring"""
super().test_from_pretrained_with_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
super().test_no_automatic_init()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : str) -> Union[str, Any]:
"""simple docstring"""
super().test_hidden_states_output()
@slow
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained("google/bigbird-roberta-base")
self.assertIsNotNone(lowercase_)
def __UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
if self.test_attn_probs:
super().test_attention_outputs()
@slow
# copied from `test_modeling_flax_common` because it takes much longer than other models
def __UpperCAmelCase ( self : Tuple) -> Dict:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
_UpperCamelCase = self._prepare_for_class(lowercase_ , lowercase_)
_UpperCamelCase = model_class(lowercase_)
@jax.jit
def model_jitted(lowercase_ : Dict , lowercase_ : List[Any]=None , **lowercase_ : Tuple):
return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_)
with self.subTest("JIT Enabled"):
_UpperCamelCase = model_jitted(**lowercase_).to_tuple()
with self.subTest("JIT Disabled"):
with jax.disable_jit():
_UpperCamelCase = model_jitted(**lowercase_).to_tuple()
self.assertEqual(len(lowercase_) , len(lowercase_))
for jitted_output, output in zip(lowercase_ , lowercase_):
self.assertEqual(jitted_output.shape , output.shape)
def __UpperCAmelCase ( self : Any , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : str=1e-5 , lowercase_ : int="outputs" , lowercase_ : List[str]=None) -> Tuple:
"""simple docstring"""
if name.startswith("outputs.attentions"):
return
else:
super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
| 547
| 0
|
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def A_( A : np.ndarray , A : np.ndarray , A : np.ndarray , A : int , A : int):
UpperCamelCase = cva.getAffineTransform(A , A)
return cva.warpAffine(A , A , (rows, cols))
if __name__ == "__main__":
# read original image
lowerCAmelCase : int = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
lowerCAmelCase : Dict = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase , lowerCAmelCase : str = gray_img.shape
# set different points to rotate image
lowerCAmelCase : Dict = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
lowerCAmelCase : int = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
lowerCAmelCase : List[str] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
lowerCAmelCase : Union[str, Any] = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
lowerCAmelCase : Optional[int] = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase : int = plt.figure(1)
lowerCAmelCase : Optional[int] = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 432
|
'''simple docstring'''
lowerCAmelCase : Optional[Any] = '\n# Installazione di Transformers\n! pip install transformers datasets\n# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e\n# rimuovi la modalità commento al comando seguente.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase : Optional[int] = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase : Optional[int] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 432
| 1
|
'''simple docstring'''
class lowercase_ (snake_case_ ):
"""simple docstring"""
pass
class lowercase_ (snake_case_ ):
"""simple docstring"""
pass
class lowercase_ :
"""simple docstring"""
def __init__( self : Tuple ):
__lowercase = [
[],
[],
[],
]
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : int ,lowercase__ : int ):
try:
if len(self.queues[priority] ) >= 1_0_0:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(__UpperCamelCase )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self : List[str] ):
return "\n".join(F"Priority {i}: {q}" for i, q in enumerate(self.queues ) )
class lowercase_ :
"""simple docstring"""
def __init__( self : int ):
__lowercase = []
def SCREAMING_SNAKE_CASE ( self : Tuple ,lowercase__ : int ):
if len(self.queue ) == 1_0_0:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(__UpperCamelCase )
def SCREAMING_SNAKE_CASE ( self : str ):
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
__lowercase = min(self.queue )
self.queue.remove(__UpperCamelCase )
return data
def __str__( self : Union[str, Any] ):
return str(self.queue )
def _A ( ):
"""simple docstring"""
__lowercase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(UpperCamelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(UpperCamelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _A ( ):
"""simple docstring"""
__lowercase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(UpperCamelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(UpperCamelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 41
|
'''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class _snake_case :
'''simple docstring'''
def __init__( self: Any , __UpperCamelCase: List[Any] ) -> Dict:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
__magic_name__ : Optional[int] = deepcopy(__UpperCamelCase )
elif os.path.exists(__UpperCamelCase ):
with io.open(__UpperCamelCase , "r" , encoding="utf-8" ) as f:
__magic_name__ : Optional[int] = json.load(__UpperCamelCase )
else:
try:
__magic_name__ : str = baseaa.urlsafe_baadecode(__UpperCamelCase ).decode("utf-8" )
__magic_name__ : int = json.loads(__UpperCamelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
__magic_name__ : Optional[Any] = config
self.set_stage_and_offload()
def lowerCAmelCase__ ( self: str ) -> Optional[Any]:
# zero stage - this is done as early as possible, before model is created, to allow
# ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object
# during ``zero.Init()`` which needs to know the dtype, and some other hparams.
__magic_name__ : List[str] = self.get_value("zero_optimization.stage" , -1 )
# offload
__magic_name__ : Tuple = False
if self.is_zeroa() or self.is_zeroa():
__magic_name__ : List[str] = set(["cpu", "nvme"] )
__magic_name__ : Dict = set(
[
self.get_value("zero_optimization.offload_optimizer.device" ),
self.get_value("zero_optimization.offload_param.device" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
__magic_name__ : List[str] = True
def lowerCAmelCase__ ( self: Optional[Any] , __UpperCamelCase: str ) -> Optional[int]:
__magic_name__ : Tuple = self.config
# find the config node of interest if it exists
__magic_name__ : int = ds_key_long.split("." )
__magic_name__ : List[Any] = nodes.pop()
for node in nodes:
__magic_name__ : List[Any] = config.get(__UpperCamelCase )
if config is None:
return None, ds_key
return config, ds_key
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Dict , __UpperCamelCase: int=None ) -> Union[str, Any]:
__magic_name__ , __magic_name__ : int = self.find_config_node(__UpperCamelCase )
if config is None:
return default
return config.get(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: str , __UpperCamelCase: Optional[int] , __UpperCamelCase: Tuple=False ) -> Tuple:
__magic_name__ : List[str] = self.config
# find the config node of interest if it exists
__magic_name__ : Any = ds_key_long.split("." )
for node in nodes:
__magic_name__ : Dict = config
__magic_name__ : Union[str, Any] = config.get(__UpperCamelCase )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__UpperCamelCase )
def lowerCAmelCase__ ( self: Dict , __UpperCamelCase: Optional[Any] ) -> List[Any]:
__magic_name__ : List[Any] = self.get_value(__UpperCamelCase )
return False if value is None else bool(__UpperCamelCase )
def lowerCAmelCase__ ( self: Dict , __UpperCamelCase: List[str] ) -> Tuple:
__magic_name__ : List[Any] = self.get_value(__UpperCamelCase )
return False if value is None else not bool(__UpperCamelCase )
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
return self._stage == 2
def lowerCAmelCase__ ( self: Union[str, Any] ) -> List[Any]:
return self._stage == 3
def lowerCAmelCase__ ( self: Union[str, Any] ) -> str:
return self._offload
class _snake_case :
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: Union[str, Any] ) -> Tuple:
__magic_name__ : Tuple = engine
def lowerCAmelCase__ ( self: Optional[int] , __UpperCamelCase: int , **__UpperCamelCase: Union[str, Any] ) -> Tuple:
# runs backpropagation and handles mixed precision
self.engine.backward(__UpperCamelCase , **__UpperCamelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class _snake_case ( snake_case_ ):
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: Optional[int] ) -> List[Any]:
super().__init__(__UpperCamelCase , device_placement=__UpperCamelCase , scaler=__UpperCamelCase )
__magic_name__ : Any = hasattr(self.optimizer , "overflow" )
def lowerCAmelCase__ ( self: List[str] , __UpperCamelCase: List[str]=None ) -> Union[str, Any]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def lowerCAmelCase__ ( self: Union[str, Any] ) -> Optional[int]:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def lowerCAmelCase__ ( self: Optional[Any] ) -> List[str]:
if self.__has_overflow__:
return self.optimizer.overflow
return False
class _snake_case ( snake_case_ ):
'''simple docstring'''
def __init__( self: List[Any] , __UpperCamelCase: str , __UpperCamelCase: Dict ) -> Any:
super().__init__(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self: int ) -> Dict:
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class _snake_case :
'''simple docstring'''
def __init__( self: Optional[Any] , __UpperCamelCase: Optional[Any] , __UpperCamelCase: str=0.0_0_1 , __UpperCamelCase: List[Any]=0 , **__UpperCamelCase: List[str] ) -> Union[str, Any]:
__magic_name__ : List[Any] = params
__magic_name__ : List[Any] = lr
__magic_name__ : List[str] = weight_decay
__magic_name__ : Any = kwargs
class _snake_case :
'''simple docstring'''
def __init__( self: List[str] , __UpperCamelCase: List[Any] , __UpperCamelCase: int=None , __UpperCamelCase: int=0 , **__UpperCamelCase: Optional[Any] ) -> str:
__magic_name__ : Optional[int] = optimizer
__magic_name__ : Any = total_num_steps
__magic_name__ : int = warmup_num_steps
__magic_name__ : Dict = kwargs
| 436
| 0
|
'''simple docstring'''
def _a( UpperCamelCase__ : int = 1_0**1_2 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =1
SCREAMING_SNAKE_CASE__ : str =0
SCREAMING_SNAKE_CASE__ : int =1
SCREAMING_SNAKE_CASE__ : Tuple =1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 665
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-neox-20b': 'https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json',
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """gpt_neox"""
def __init__( self : List[Any] , __lowercase : Union[str, Any]=5_04_32 , __lowercase : int=61_44 , __lowercase : Tuple=44 , __lowercase : List[str]=64 , __lowercase : str=2_45_76 , __lowercase : Dict="gelu" , __lowercase : Tuple=0.25 , __lowercase : Tuple=1_00_00 , __lowercase : Tuple=0.0 , __lowercase : str=0.0 , __lowercase : List[Any]=0.1 , __lowercase : Dict=20_48 , __lowercase : Any=0.02 , __lowercase : Dict=1e-5 , __lowercase : List[Any]=True , __lowercase : str=0 , __lowercase : Optional[Any]=2 , __lowercase : Tuple=False , __lowercase : List[Any]=True , __lowercase : Optional[Any]=None , **__lowercase : Any , ) -> Dict:
super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any =hidden_size
SCREAMING_SNAKE_CASE__ : str =num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any =num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] =intermediate_size
SCREAMING_SNAKE_CASE__ : Dict =hidden_act
SCREAMING_SNAKE_CASE__ : str =rotary_pct
SCREAMING_SNAKE_CASE__ : Optional[Any] =rotary_emb_base
SCREAMING_SNAKE_CASE__ : List[Any] =attention_dropout
SCREAMING_SNAKE_CASE__ : List[Any] =hidden_dropout
SCREAMING_SNAKE_CASE__ : str =classifier_dropout
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Dict =layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any =use_cache
SCREAMING_SNAKE_CASE__ : Tuple =tie_word_embeddings
SCREAMING_SNAKE_CASE__ : Tuple =use_parallel_residual
SCREAMING_SNAKE_CASE__ : Union[str, Any] =rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'''The hidden size is not divisble by the number of attention heads! Make sure to update them!''' )
def __magic_name__ ( self : Optional[Any] ) -> Optional[Any]:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __lowercase ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"got {self.rope_scaling}" )
SCREAMING_SNAKE_CASE__ : int =self.rope_scaling.get('''type''' , __lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.rope_scaling.get('''factor''' , __lowercase )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(__lowercase , __lowercase ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 665
| 1
|
"""simple docstring"""
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__)
def __lowerCamelCase ( lowerCAmelCase__ ):
print('Loading config file...' )
def flatten_yaml_as_dict(lowerCAmelCase__ ,lowerCAmelCase__="" ,lowerCAmelCase__="." ):
A__ = []
for k, v in d.items():
A__ = parent_key + sep + k if parent_key else k
if isinstance(lowerCAmelCase__ ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(lowerCAmelCase__ ,lowerCAmelCase__ ,sep=lowerCAmelCase__ ).items() )
else:
items.append((new_key, v) )
return dict(lowerCAmelCase__ )
A__ = argparse.Namespace()
with open(lowerCAmelCase__ ,'r' ) as yaml_file:
try:
A__ = yaml.load(lowerCAmelCase__ ,Loader=yaml.FullLoader )
A__ = flatten_yaml_as_dict(lowerCAmelCase__ )
for k, v in flat_cfg.items():
setattr(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(lowerCAmelCase__ ,str(lowerCAmelCase__ ) ) )
return config
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = MobileViTVaConfig()
A__ = False
# dataset
if task_name.startswith('imagenet1k_' ):
A__ = 1000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
A__ = 2_1000
if int(task_name.strip().split('_' )[-1] ) == 384:
A__ = 384
else:
A__ = 256
A__ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
A__ = 151
A__ = 512
A__ = 'ade20k-id2label.json'
A__ = True
elif task_name.startswith('voc_' ):
A__ = 21
A__ = 512
A__ = 'pascal-voc-id2label.json'
A__ = True
# orig_config
A__ = load_orig_config_file(lowerCAmelCase__ )
assert getattr(lowerCAmelCase__ ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model"
A__ = getattr(lowerCAmelCase__ ,'model.classification.mitv2.width_multiplier' ,1.0 )
assert (
getattr(lowerCAmelCase__ ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
A__ = getattr(lowerCAmelCase__ ,'model.classification.activation.name' ,'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
A__ = getattr(lowerCAmelCase__ ,'model.segmentation.output_stride' ,16 )
if "_deeplabv3" in task_name:
A__ = getattr(lowerCAmelCase__ ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] )
A__ = getattr(lowerCAmelCase__ ,'model.segmentation.deeplabv3.aspp_out_channels' ,512 )
A__ = getattr(lowerCAmelCase__ ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 )
# id2label
A__ = 'huggingface/label-files'
A__ = json.load(open(hf_hub_download(lowerCAmelCase__ ,lowerCAmelCase__ ,repo_type='dataset' ) ,'r' ) )
A__ = {int(lowerCAmelCase__ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
return config
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = dct.pop(lowerCAmelCase__ )
A__ = val
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__=False ):
if base_model:
A__ = ''
else:
A__ = 'mobilevitv2.'
A__ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
A__ = k[8:]
else:
A__ = k
if ".block." in k:
A__ = k_new.replace('.block.' ,'.' )
if ".conv." in k:
A__ = k_new.replace('.conv.' ,'.convolution.' )
if ".norm." in k:
A__ = k_new.replace('.norm.' ,'.normalization.' )
if "conv_1." in k:
A__ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
A__ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
A__ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' )
if ".red_1x1." in k:
A__ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
A__ = [0, 1]
elif i == 4:
A__ = [0, 1, 2, 3]
elif i == 5:
A__ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
A__ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
A__ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
A__ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' )
if "pre_norm_attn.1." in k:
A__ = k_new.replace('pre_norm_attn.1.' ,'attention.' )
if "pre_norm_ffn.0." in k:
A__ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' )
if "pre_norm_ffn.1." in k:
A__ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
A__ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' )
if "classifier.1." in k:
A__ = k_new.replace('classifier.1.' ,'classifier.' )
if "seg_head." in k:
A__ = k_new.replace('seg_head.' ,'segmentation_head.' )
if ".aspp_layer." in k:
A__ = k_new.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in k:
A__ = k_new.replace('.aspp_pool.' ,'.' )
rename_keys.append((k, k_new) )
return rename_keys
def __lowerCamelCase ( lowerCAmelCase__ ):
A__ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(lowerCAmelCase__ )
for k in keys_to_ignore:
state_dict.pop(lowerCAmelCase__ ,lowerCAmelCase__ )
def __lowerCamelCase ( ):
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
A__ = Image.open(requests.get(lowerCAmelCase__ ,stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
A__ = get_mobilevitva_config(lowerCAmelCase__ ,lowerCAmelCase__ )
# load original state_dict
A__ = torch.load(lowerCAmelCase__ ,map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
A__ = MobileViTVaForSemanticSegmentation(lowerCAmelCase__ ).eval()
A__ = False
else:
A__ = MobileViTVaForImageClassification(lowerCAmelCase__ ).eval()
A__ = False
# remove and rename some keys of load the original model
A__ = checkpoint
remove_unused_keys(lowerCAmelCase__ )
A__ = create_rename_keys(lowerCAmelCase__ ,base_model=lowerCAmelCase__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ )
# load modified state_dict
model.load_state_dict(lowerCAmelCase__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
A__ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
A__ = image_processor(images=prepare_img() ,return_tensors='pt' )
A__ = model(**lowerCAmelCase__ )
# verify classification model
if task_name.startswith('imagenet' ):
A__ = outputs.logits
A__ = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
A__ = torch.tensor([-1.6_3_3_6E0_0, -7.3_2_0_4E-0_2, -5.1_8_8_3E-0_1] )
assert torch.allclose(logits[0, :3] ,lowerCAmelCase__ ,atol=1E-4 )
Path(lowerCAmelCase__ ).mkdir(exist_ok=lowerCAmelCase__ )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(lowerCAmelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
SCREAMING_SNAKE_CASE : str = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 260
|
"""simple docstring"""
from heapq import heappop, heappush
import numpy as np
def __lowerCamelCase ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,):
A__ , A__ = grid.shape
A__ = [-1, 1, 0, 0]
A__ = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
A__ , A__ = [(0, source)], set()
A__ = np.full((rows, cols) ,np.inf )
A__ = 0
A__ = np.empty((rows, cols) ,dtype=lowerCAmelCase__ )
A__ = None
while queue:
((A__) , (A__)) = heappop(lowerCAmelCase__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
A__ = []
while (x, y) != source:
path.append((x, y) )
A__ , A__ = predecessors[x, y]
path.append(lowerCAmelCase__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(lowerCAmelCase__ ) ):
A__ , A__ = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
A__ = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(lowerCAmelCase__ ,(dist + 1, (nx, ny)) )
A__ = dist + 1
A__ = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 260
| 1
|
'''simple docstring'''
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def lowerCAmelCase_ ( _lowerCamelCase: Tuple=32 , _lowerCamelCase: Optional[Any]=10 , _lowerCamelCase: int=1_00 , _lowerCamelCase: str=10_26 , _lowerCamelCase: int=True , _lowerCamelCase: List[Any]="data/tokenized_stories_train_wikitext103.jbl" , _lowerCamelCase: int="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : int = generate_datasets(
_lowerCamelCase , _lowerCamelCase , number=_lowerCamelCase , min_len=10_26 , trim=_lowerCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__SCREAMING_SNAKE_CASE : Optional[int] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
__SCREAMING_SNAKE_CASE : int = load_gpta("""gpt2""" ).to(_lowerCamelCase )
print("""computing perplexity on objective set""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).item()
print("""perplexity on objective set:""" , _lowerCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def lowerCAmelCase_ ( _lowerCamelCase: Dict , _lowerCamelCase: List[str]=15 , _lowerCamelCase: Union[str, Any]=1_28 , _lowerCamelCase: List[str]=1_00 , _lowerCamelCase: List[Any]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
__SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
__SCREAMING_SNAKE_CASE : List[str] = SecondaryLearner(_lowerCamelCase )
# Train secondary learner
__SCREAMING_SNAKE_CASE : Tuple = train_secondary_learner(
_lowerCamelCase , _lowerCamelCase , max_epochs=_lowerCamelCase , batch_size=_lowerCamelCase , eval_freq=1_00 , igf_model_path=_lowerCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def lowerCAmelCase_ ( _lowerCamelCase: Optional[Any] , _lowerCamelCase: List[Any] , _lowerCamelCase: Tuple , _lowerCamelCase: List[str]=32 , _lowerCamelCase: Optional[int]=10_00 , _lowerCamelCase: Optional[int]=16 , _lowerCamelCase: List[Any]=1.0 , _lowerCamelCase: List[str]=recopy_gpta , _lowerCamelCase: Optional[Any]=None , _lowerCamelCase: Tuple=10 , _lowerCamelCase: int="gpt2_finetuned.pt" , ):
__SCREAMING_SNAKE_CASE : Any = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
__SCREAMING_SNAKE_CASE : Optional[int] = RandomSampler(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[Any] = DataLoader(_lowerCamelCase , sampler=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : int = max_steps // (len(_lowerCamelCase )) + 1
__SCREAMING_SNAKE_CASE : List[str] = 0
__SCREAMING_SNAKE_CASE : Optional[int] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = recopy_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_lowerCamelCase )
secondary_learner.eval()
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Dict = 0
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : List[str] = []
# Compute the performance of the transformer model at the beginning
__SCREAMING_SNAKE_CASE : List[str] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
test_perps.append(_lowerCamelCase )
print("""Test perplexity, step""" , _lowerCamelCase , """:""" , _lowerCamelCase )
for epoch in range(int(_lowerCamelCase ) ):
for step, example in enumerate(_lowerCamelCase ):
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 )
__SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__SCREAMING_SNAKE_CASE : int = model(_lowerCamelCase , labels=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Dict = True
if secondary_learner is not None:
__SCREAMING_SNAKE_CASE : int = secondary_learner.forward(
torch.tensor(_lowerCamelCase , dtype=torch.long , device=_lowerCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_lowerCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__SCREAMING_SNAKE_CASE : Any = -1
if predicted_q < threshold:
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__SCREAMING_SNAKE_CASE : str = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__SCREAMING_SNAKE_CASE : Tuple = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__SCREAMING_SNAKE_CASE : List[Any] = compute_perplexity(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
test_perps.append(_lowerCamelCase )
print("""Test perplexity, step""" , _lowerCamelCase , """:""" , _lowerCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _lowerCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def lowerCAmelCase_ ( ):
__SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=_lowerCamelCase , type=_lowerCamelCase , required=_lowerCamelCase , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=_lowerCamelCase , type=_lowerCamelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=32 , type=_lowerCamelCase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_00 , type=_lowerCamelCase , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_00 , type=_lowerCamelCase , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=10_00 , type=_lowerCamelCase , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_28 , type=_lowerCamelCase , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=16 , type=_lowerCamelCase , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=10 , type=_lowerCamelCase , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_00 , type=_lowerCamelCase , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=10_26 , type=_lowerCamelCase , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=15 , type=_lowerCamelCase , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=_lowerCamelCase , type=_lowerCamelCase , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=_lowerCamelCase , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=_lowerCamelCase , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=_lowerCamelCase , type=_lowerCamelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=1_00 , min_len=10_26 , trim=_lowerCamelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
__SCREAMING_SNAKE_CASE : str = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
__SCREAMING_SNAKE_CASE : List[Any] = training_secondary_learner(
_lowerCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=1_28 , eval_freq=1_00 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
__SCREAMING_SNAKE_CASE : Tuple = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = generate_datasets(
context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_00 , min_len=10_26 , trim=_lowerCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , context_len=32 , max_steps=10_00 , batch_size=16 , threshold=1.0 , recopy_model=_lowerCamelCase , secondary_learner=_lowerCamelCase , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main()
| 178
|
'''simple docstring'''
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class _UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : Any , lowerCAmelCase__ : int = 1_2_8 , lowerCAmelCase__ : int = 2_5_6 , lowerCAmelCase__ : float = 20_00.0 , lowerCAmelCase__ : int = 7_6_8 , lowerCAmelCase__ : int = 1_2 , lowerCAmelCase__ : int = 1_2 , lowerCAmelCase__ : int = 6_4 , lowerCAmelCase__ : int = 2_0_4_8 , lowerCAmelCase__ : float = 0.1 , ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Sequential(
nn.Linear(lowerCAmelCase__ , d_model * 4 , bias=lowerCAmelCase__ ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=lowerCAmelCase__ ) , nn.SiLU() , )
__SCREAMING_SNAKE_CASE : Dict = nn.Embedding(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nn.Dropout(p=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList()
for lyr_num in range(lowerCAmelCase__ ):
# FiLM conditional T5 decoder
__SCREAMING_SNAKE_CASE : Optional[Any] = DecoderLayer(d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ )
self.decoders.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = TaLayerNorm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = nn.Dropout(p=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
def UpperCamelCase__ ( self : Optional[int] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
__SCREAMING_SNAKE_CASE : Tuple = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
__SCREAMING_SNAKE_CASE : Optional[int] = self.conditioning_emb(lowerCAmelCase__ ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
__SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
__SCREAMING_SNAKE_CASE : Tuple = torch.broadcast_to(
torch.arange(lowerCAmelCase__ , device=decoder_input_tokens.device ) , (batch, seq_length) , )
__SCREAMING_SNAKE_CASE : str = self.position_encoding(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = self.continuous_inputs_projection(lowerCAmelCase__ )
inputs += position_encodings
__SCREAMING_SNAKE_CASE : Optional[Any] = self.dropout(lowerCAmelCase__ )
# decoder: No padding present.
__SCREAMING_SNAKE_CASE : Tuple = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
__SCREAMING_SNAKE_CASE : List[Any] = [(x, self.encoder_decoder_mask(lowerCAmelCase__ , lowerCAmelCase__ )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
__SCREAMING_SNAKE_CASE : List[str] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
__SCREAMING_SNAKE_CASE : Dict = lyr(
lowerCAmelCase__ , conditioning_emb=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , encoder_attention_mask=lowerCAmelCase__ , )[0]
__SCREAMING_SNAKE_CASE : List[str] = self.decoder_norm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[Any] = self.post_dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = self.spec_out(lowerCAmelCase__ )
return spec_out
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict , lowerCAmelCase__ : int , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : str , lowerCAmelCase__ : Tuple=1E-6 ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=lowerCAmelCase__ , d_kv=lowerCAmelCase__ , num_heads=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ , layer_norm_epsilon=lowerCAmelCase__ ) )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any]=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Dict=None , lowerCAmelCase__ : Optional[int]=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.layer[0](
lowerCAmelCase__ , conditioning_emb=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , )
if encoder_hidden_states is not None:
__SCREAMING_SNAKE_CASE : List[str] = torch.where(encoder_attention_mask > 0 , 0 , -1E10 ).to(
encoder_hidden_states.dtype )
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.layer[1](
lowerCAmelCase__ , key_value_states=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , )
# Apply Film Conditional Feed Forward layer
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.layer[-1](lowerCAmelCase__ , lowerCAmelCase__ )
return (hidden_states,)
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : List[Any] = TaLayerNorm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = Attention(query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , out_bias=lowerCAmelCase__ , scale_qk=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(lowerCAmelCase__ )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : int=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
__SCREAMING_SNAKE_CASE : str = self.FiLMLayer(lowerCAmelCase__ , lowerCAmelCase__ )
# Self-attention block
__SCREAMING_SNAKE_CASE : List[Any] = self.attention(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = Attention(query_dim=lowerCAmelCase__ , heads=lowerCAmelCase__ , dim_head=lowerCAmelCase__ , out_bias=lowerCAmelCase__ , scale_qk=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = TaLayerNorm(lowerCAmelCase__ , eps=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = nn.Dropout(lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Any=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.layer_norm(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.attention(
lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , attention_mask=attention_mask.squeeze(1 ) , )
__SCREAMING_SNAKE_CASE : Dict = hidden_states + self.dropout(lowerCAmelCase__ )
return layer_output
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : int ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Any = TaDenseGatedActDense(d_model=lowerCAmelCase__ , d_ff=lowerCAmelCase__ , dropout_rate=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = TaFiLMLayer(in_features=d_model * 4 , out_features=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = TaLayerNorm(lowerCAmelCase__ , eps=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Tuple = nn.Dropout(lowerCAmelCase__ )
def UpperCamelCase__ ( self : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=None ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.layer_norm(lowerCAmelCase__ )
if conditioning_emb is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.film(lowerCAmelCase__ , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = self.DenseReluDense(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = hidden_states + self.dropout(lowerCAmelCase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Dict ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ , bias=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Dict = NewGELUActivation()
def UpperCamelCase__ ( self : List[str] , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = self.act(self.wi_a(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : Any = self.wi_a(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_gelu * hidden_linear
__SCREAMING_SNAKE_CASE : List[Any] = self.dropout(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = self.wo(lowerCAmelCase__ )
return hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple=1E-6 ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Dict = nn.Parameter(torch.ones(lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : str = eps
def UpperCamelCase__ ( self : Dict , lowerCAmelCase__ : Optional[Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Any = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
__SCREAMING_SNAKE_CASE : Optional[Any] = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def UpperCamelCase__ ( self : Union[str, Any] , lowerCAmelCase__ : torch.Tensor ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.04_47_15 * torch.pow(lowerCAmelCase__ , 3.0 )) ))
class _UpperCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__SCREAMING_SNAKE_CASE : Tuple = nn.Linear(lowerCAmelCase__ , out_features * 2 , bias=lowerCAmelCase__ )
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = self.scale_bias(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = torch.chunk(lowerCAmelCase__ , 2 , -1 )
__SCREAMING_SNAKE_CASE : Dict = x * (1 + scale) + shift
return x
| 178
| 1
|
"""simple docstring"""
import math
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]:
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowerCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 434
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ : int = logging.get_logger(__name__)
lowerCamelCase_ : List[str] = """▁"""
lowerCamelCase_ : List[str] = {"""vocab_file""": """sentencepiece.bpe.model""", """monolingual_vocab_file""": """dict.txt"""}
lowerCamelCase_ : List[str] = {
"""vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model""",
},
"""monolingual_vocab_file""": {
"""vinai/bartpho-syllable""": """https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt""",
},
}
lowerCamelCase_ : int = {"""vinai/bartpho-syllable""": 1_024}
class a__ ( __snake_case ):
A__ : Union[str, Any] = VOCAB_FILES_NAMES
A__ : Any = PRETRAINED_VOCAB_FILES_MAP
A__ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase = None , **UpperCAmelCase , ) -> None:
# Mask token behave like a normal word, i.e. include the space before it
__a = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
__a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase , )
__a = vocab_file
__a = monolingual_vocab_file
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
__a = {}
__a = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
__a = cnt
cnt += 1
with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
__a = line.strip().split()[0]
__a = len(self.fairseq_tokens_to_ids )
if str(UpperCAmelCase ) not in self.fairseq_tokens_to_ids:
__a = len(self.fairseq_tokens_to_ids )
__a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> List[Any]:
__a = self.__dict__.copy()
__a = None
__a = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , UpperCAmelCase ) -> Union[str, Any]:
__a = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
__a = {}
__a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__a = [self.cls_token_id]
__a = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> List[int]:
__a = [self.sep_token_id]
__a = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __SCREAMING_SNAKE_CASE ( self ) -> Tuple:
return len(self.fairseq_ids_to_tokens )
def __SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
__a = {self.convert_ids_to_tokens(UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Union[str, Any]:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Dict:
return self.fairseq_ids_to_tokens[index]
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase ) -> Optional[Any]:
__a = ''.join(UpperCAmelCase ).replace(UpperCAmelCase , ' ' ).strip()
return out_string
def __SCREAMING_SNAKE_CASE ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
__a = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__a = os.path.join(
UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase , 'wb' ) as fi:
__a = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCAmelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCAmelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(UpperCAmelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 559
| 0
|
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_snake_case = logging.get_logger(__name__)
_snake_case = "T5Config"
def lowerCamelCase_ ( A : List[Any] , A : Dict , A : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase_ = jnp.zeros_like(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCAmelCase_ = shifted_input_ids.at[:, 0].set(__SCREAMING_SNAKE_CASE )
lowerCAmelCase_ = jnp.where(shifted_input_ids == -1_00 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return shifted_input_ids
class UpperCamelCase_ ( lowercase__ ):
'''simple docstring'''
a :List[Any] = 'mt5'
a :List[str] = MTaConfig
class UpperCamelCase_ ( lowercase__ ):
'''simple docstring'''
a :Optional[int] = 'mt5'
a :Optional[int] = MTaConfig
class UpperCamelCase_ ( lowercase__ ):
'''simple docstring'''
a :int = 'mt5'
a :Optional[int] = MTaConfig
| 701
|
import torch
def lowerCamelCase_ ( ):
"""simple docstring"""
if torch.cuda.is_available():
lowerCAmelCase_ = torch.cuda.device_count()
else:
lowerCAmelCase_ = 0
print(F'Successfully ran on {num_gpus} GPUs' )
if __name__ == "__main__":
main()
| 413
| 0
|
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
SCREAMING_SNAKE_CASE_ = logging.getLogger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__ = 10 , SCREAMING_SNAKE_CASE__ = 2 ):
def get_dataset(SCREAMING_SNAKE_CASE__ ):
__a : Optional[int] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(__A , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__a : Dict = get_dataset(__A )
__a : Dict = get_dataset(__A )
__a : List[Any] = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
__a : Union[str, Any] = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
return (train_dataloader, valid_dataloader)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
__a : Union[str, Any] = []
for epoch in range(__A ):
# Train quickly
model.train()
for batch in dataloader:
__a , __a : int = batch
__a : Optional[int] = model(__A )
__a : Optional[int] = torch.nn.functional.mse_loss(__A , __A )
accelerator.backward(__A )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class lowerCAmelCase ( nn.Module ):
"""simple docstring"""
def __init__( self ) -> List[str]:
super().__init__()
__a : Any = nn.Parameter(torch.randn(1 ) )
__a : str = nn.Parameter(torch.randn(1 ) )
def __magic_name__ ( self , _A ) -> List[str]:
return x * self.a + self.b
class lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__a : Tuple = DummyModel()
__a : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__a , __a : Union[str, Any] = dummy_dataloaders()
__a : str = ProjectConfiguration(total_limit=1 , project_dir=_A , automatic_checkpoint_naming=_A )
# Train baseline
__a : Optional[Any] = Accelerator(project_config=_A )
__a , __a , __a , __a : Union[str, Any] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def __magic_name__ ( self ) -> int:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__a : Optional[int] = DummyModel()
__a : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__a , __a : Optional[Any] = dummy_dataloaders()
# Train baseline
__a : str = Accelerator()
__a , __a , __a , __a : int = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
__a : str = os.path.join(_A , 'initial' )
accelerator.save_state(_A )
((__a) , (__a)) : Optional[Any] = model.a.item(), model.b.item()
__a : List[str] = optimizer.state_dict()
__a : Union[str, Any] = train(3 , _A , _A , _A , _A )
((__a) , (__a)) : List[Any] = model.a.item(), model.b.item()
__a : str = optimizer.state_dict()
# Train partially
set_seed(42 )
__a : List[str] = DummyModel()
__a : Tuple = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__a , __a : Any = dummy_dataloaders()
__a : Tuple = Accelerator()
__a , __a , __a , __a : List[Any] = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(_A )
((__a) , (__a)) : Tuple = model.a.item(), model.b.item()
__a : Optional[int] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
__a : Dict = train(2 , _A , _A , _A , _A )
# Save everything
__a : Optional[Any] = os.path.join(_A , 'checkpoint' )
accelerator.save_state(_A )
# Load everything back in and make sure all states work
accelerator.load_state(_A )
test_rands += train(1 , _A , _A , _A , _A )
((__a) , (__a)) : str = model.a.item(), model.b.item()
__a : Tuple = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def __magic_name__ ( self ) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__a : Union[str, Any] = DummyModel()
__a : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__a , __a : str = dummy_dataloaders()
__a : Optional[int] = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
__a : Tuple = Accelerator(project_dir=_A , project_config=_A )
__a , __a , __a , __a : Optional[int] = accelerator.prepare(
_A , _A , _A , _A )
# Save initial
accelerator.save_state()
((__a) , (__a)) : int = model.a.item(), model.b.item()
__a : Optional[int] = optimizer.state_dict()
__a : Optional[Any] = train(3 , _A , _A , _A , _A )
((__a) , (__a)) : Optional[Any] = model.a.item(), model.b.item()
__a : int = optimizer.state_dict()
# Train partially
set_seed(42 )
__a : Optional[int] = DummyModel()
__a : Dict = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__a , __a : Optional[Any] = dummy_dataloaders()
__a : Optional[Any] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=_A )
__a : Dict = Accelerator(project_dir=_A , project_config=_A )
__a , __a , __a , __a : Optional[Any] = accelerator.prepare(
_A , _A , _A , _A )
accelerator.load_state(os.path.join(_A , 'checkpoints' , 'checkpoint_0' ) )
((__a) , (__a)) : Optional[int] = model.a.item(), model.b.item()
__a : Optional[Any] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
__a : Union[str, Any] = train(2 , _A , _A , _A , _A )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , 'checkpoints' , 'checkpoint_1' ) )
test_rands += train(1 , _A , _A , _A , _A )
((__a) , (__a)) : List[Any] = model.a.item(), model.b.item()
__a : List[str] = optimizer.state_dict()
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
self.assertEqual(_A , _A )
def __magic_name__ ( self ) -> int:
__a : List[str] = torch.tensor([1, 2, 3] )
__a : Optional[Any] = torch.tensor([2, 3, 4] )
__a : Dict = DummyModel()
__a : List[Any] = torch.optim.Adam(net.parameters() )
__a : Dict = Accelerator()
with self.assertRaises(_A ) as ve:
accelerator.register_for_checkpointing(_A , _A , _A , _A )
__a : Any = str(ve.exception )
self.assertTrue('Item at index 0' in message )
self.assertTrue('Item at index 1' in message )
self.assertFalse('Item at index 2' in message )
self.assertFalse('Item at index 3' in message )
def __magic_name__ ( self ) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__a : List[Any] = DummyModel()
__a : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
__a : int = torch.optim.lr_scheduler.StepLR(_A , step_size=1 , gamma=0.99 )
__a , __a : List[str] = dummy_dataloaders()
__a : Optional[Any] = ProjectConfiguration(automatic_checkpoint_naming=_A )
# Train baseline
__a : str = Accelerator(project_dir=_A , project_config=_A )
__a , __a , __a , __a , __a : Union[str, Any] = accelerator.prepare(
_A , _A , _A , _A , _A )
# Save initial
accelerator.save_state()
__a : str = scheduler.state_dict()
train(3 , _A , _A , _A , _A , _A )
self.assertNotEqual(_A , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_A , 'checkpoints' , 'checkpoint_0' ) )
self.assertEqual(_A , scheduler.state_dict() )
def __magic_name__ ( self ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__a : Tuple = DummyModel()
__a : str = ProjectConfiguration(automatic_checkpoint_naming=_A , total_limit=2 )
# Train baseline
__a : Dict = Accelerator(project_dir=_A , project_config=_A )
__a : Dict = accelerator.prepare(_A )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_A , 'checkpoints' , 'checkpoint_0' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , 'checkpoints' , 'checkpoint_9' ) ) )
self.assertTrue(os.path.exists(os.path.join(_A , 'checkpoints' , 'checkpoint_10' ) ) )
@require_cuda
def __magic_name__ ( self ) -> Optional[Any]:
__a : Any = ['torchrun', f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(_A , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = '/tmp/accelerate/state_checkpointing'
SCREAMING_SNAKE_CASE_ = DummyModel()
SCREAMING_SNAKE_CASE_ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
SCREAMING_SNAKE_CASE_ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
SCREAMING_SNAKE_CASE_ = dummy_dataloaders()
SCREAMING_SNAKE_CASE_ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
SCREAMING_SNAKE_CASE_ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
SCREAMING_SNAKE_CASE_ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
SCREAMING_SNAKE_CASE_ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
SCREAMING_SNAKE_CASE_ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
SCREAMING_SNAKE_CASE_ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 597
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
a__ : List[Any] = logging.get_logger(__name__)
a__ : Dict = {'vocab_file': 'vocab.txt'}
a__ : Dict = {
'vocab_file': {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt',
}
}
a__ : Dict = {
'YituTech/conv-bert-base': 5_1_2,
'YituTech/conv-bert-medium-small': 5_1_2,
'YituTech/conv-bert-small': 5_1_2,
}
a__ : Optional[Any] = {
'YituTech/conv-bert-base': {'do_lower_case': True},
'YituTech/conv-bert-medium-small': {'do_lower_case': True},
'YituTech/conv-bert-small': {'do_lower_case': True},
}
class UpperCAmelCase__ ( UpperCAmelCase_):
__SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE = PRETRAINED_INIT_CONFIGURATION
__SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE = ConvBertTokenizer
def __init__( self , lowercase=None , lowercase=None , lowercase=True , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , lowercase=True , lowercase=None , **lowercase , ) -> List[str]:
super().__init__(
lowercase , tokenizer_file=lowercase , do_lower_case=lowercase , unk_token=lowercase , sep_token=lowercase , pad_token=lowercase , cls_token=lowercase , mask_token=lowercase , tokenize_chinese_chars=lowercase , strip_accents=lowercase , **lowercase , )
__UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , lowercase ) != do_lower_case
or normalizer_state.get("""strip_accents""" , lowercase ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , lowercase ) != tokenize_chinese_chars
):
__UpperCamelCase = getattr(lowercase , normalizer_state.pop("""type""" ) )
__UpperCamelCase = do_lower_case
__UpperCamelCase = strip_accents
__UpperCamelCase = tokenize_chinese_chars
__UpperCamelCase = normalizer_class(**lowercase )
__UpperCamelCase = do_lower_case
def __lowerCamelCase ( self , lowercase , lowercase=None ) -> List[Any]:
__UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> List[int]:
__UpperCamelCase = [self.sep_token_id]
__UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , lowercase , lowercase = None ) -> Tuple[str]:
__UpperCamelCase = self._tokenizer.model.save(lowercase , name=lowercase )
return tuple(lowercase )
| 601
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase :List[Any] = logging.get_logger(__name__)
lowerCamelCase :Dict = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class UpperCAmelCase ( __snake_case ):
a: int = """falcon"""
a: Tuple = ["""past_key_values"""]
def __init__( self: Tuple , __UpperCamelCase: List[Any]=6_5024 , __UpperCamelCase: int=4544 , __UpperCamelCase: int=32 , __UpperCamelCase: int=71 , __UpperCamelCase: Tuple=1E-5 , __UpperCamelCase: int=0.0_2 , __UpperCamelCase: Optional[Any]=True , __UpperCamelCase: Dict=0.0 , __UpperCamelCase: Any=0.0 , __UpperCamelCase: Union[str, Any]=None , __UpperCamelCase: str=False , __UpperCamelCase: List[str]=False , __UpperCamelCase: Dict=True , __UpperCamelCase: Dict=True , __UpperCamelCase: Tuple=False , __UpperCamelCase: str=11 , __UpperCamelCase: List[str]=11 , **__UpperCamelCase: Any , ):
_a = vocab_size
# Backward compatibility with n_embed kwarg
_a = kwargs.pop('''n_embed''' , __UpperCamelCase )
_a = hidden_size if n_embed is None else n_embed
_a = num_hidden_layers
_a = num_attention_heads
_a = layer_norm_epsilon
_a = initializer_range
_a = use_cache
_a = hidden_dropout
_a = attention_dropout
_a = bos_token_id
_a = eos_token_id
_a = num_attention_heads if num_kv_heads is None else num_kv_heads
_a = alibi
_a = new_decoder_architecture
_a = multi_query # Ignored when new_decoder_architecture is True
_a = parallel_attn
_a = bias
super().__init__(bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
@property
def _A ( self: List[str] ):
return self.hidden_size // self.num_attention_heads
@property
def _A ( self: List[Any] ):
return not self.alibi
| 716
|
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def __snake_case ( ) -> List[str]:
_a = argparse.ArgumentParser()
parser.add_argument(
'''-m''' , '''--pretrained_model_name_or_path''' , type=_UpperCamelCase , default=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , )
parser.add_argument(
'''-c''' , '''--caption''' , type=_UpperCamelCase , default='''robotic cat with wings''' , help='''Text used to generate images.''' , )
parser.add_argument(
'''-n''' , '''--images_num''' , type=_UpperCamelCase , default=4 , help='''How much images to generate.''' , )
parser.add_argument(
'''-s''' , '''--seed''' , type=_UpperCamelCase , default=42 , help='''Seed for random process.''' , )
parser.add_argument(
'''-ci''' , '''--cuda_id''' , type=_UpperCamelCase , default=0 , help='''cuda_id.''' , )
_a = parser.parse_args()
return args
def __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str:
if not len(_UpperCamelCase ) == rows * cols:
raise ValueError('''The specified number of rows and columns are not correct.''' )
_a , _a = imgs[0].size
_a = Image.new('''RGB''' , size=(cols * w, rows * h) )
_a , _a = grid.size
for i, img in enumerate(_UpperCamelCase ):
grid.paste(_UpperCamelCase , box=(i % cols * w, i // cols * h) )
return grid
def __snake_case ( _UpperCamelCase , _UpperCamelCase="robotic cat with wings" , _UpperCamelCase=7.5 , _UpperCamelCase=50 , _UpperCamelCase=1 , _UpperCamelCase=42 , ) -> Optional[Any]:
_a = torch.Generator(pipeline.device ).manual_seed(_UpperCamelCase )
_a = pipeline(
_UpperCamelCase , guidance_scale=_UpperCamelCase , num_inference_steps=_UpperCamelCase , generator=_UpperCamelCase , num_images_per_prompt=_UpperCamelCase , ).images
_a = int(math.sqrt(_UpperCamelCase ) )
_a = image_grid(_UpperCamelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
lowerCamelCase :Optional[int] = parse_args()
# Load models and create wrapper for stable diffusion
lowerCamelCase :List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='tokenizer')
lowerCamelCase :Union[str, Any] = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='text_encoder')
lowerCamelCase :Tuple = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='vae')
lowerCamelCase :Any = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='unet')
lowerCamelCase :Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
lowerCamelCase :Dict = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, 'best_model.pt')):
lowerCamelCase :List[str] = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, 'unet', unet)
else:
lowerCamelCase :Dict = unet.to(torch.device('cuda', args.cuda_id))
lowerCamelCase :List[str] = pipeline.to(unet.device)
lowerCamelCase , lowerCamelCase :Optional[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '{}.png'.format('_'.join(args.caption.split()))))
lowerCamelCase :str = os.path.join(args.pretrained_model_name_or_path, '_'.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '{}.png'.format(idx + 1)))
| 346
| 0
|
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
lowercase_ = {
'/attention/': '/0/SelfAttention/',
'/self_attention/': '/0/SelfAttention/',
'/encoder_decoder_attention/': '/1/EncDecAttention/',
'value': 'v',
'query': 'q',
'key': 'k',
'out': 'o',
'pre_self_attention_layer_norm': '0/layer_norm',
'pre_cross_attention_layer_norm': '1/layer_norm',
'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong
'token_embedder': 'shared',
'encoder_norm': 'final_layer_norm',
'decoder_norm': 'final_layer_norm',
'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight',
'router/router_weights/w/': 'router/classifier/',
'roer/roer_weights/w/': 'router/classifier/',
'logits_dense': 'lm_head',
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[Any]:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
_a = list(s_dict.keys() )
for key in keys:
_a = R'.*/layers_(\d+)'
_a = key
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
_a = re.sub(R'layers_(\d+)' , R'block/\1/layer' , _UpperCAmelCase )
_a = R'(encoder|decoder)\/'
if re.match(_UpperCAmelCase , _UpperCAmelCase ):
_a = re.match(_UpperCAmelCase , _UpperCAmelCase ).groups()
if groups[0] == "encoder":
_a = re.sub(R'/mlp/' , R'/1/mlp/' , _UpperCAmelCase )
_a = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , _UpperCAmelCase )
elif groups[0] == "decoder":
_a = re.sub(R'/mlp/' , R'/2/mlp/' , _UpperCAmelCase )
_a = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , _UpperCAmelCase )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
_a = new_key.replace(_UpperCAmelCase , _UpperCAmelCase )
print(f"""{key} -> {new_key}""" )
_a = s_dict.pop(_UpperCAmelCase )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
_a = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
_a = s_dict[key].shape[0]
_a = s_dict[key]
for idx in range(_UpperCAmelCase ):
_a = expert_weihts[idx]
print(f"""{key} -> {key.replace('expert/' , 'nested fstring' )}""" )
s_dict.pop(_UpperCAmelCase )
return s_dict
lowercase_ = {
'NUM_ENCODER_LAYERS': 'num_layers',
'NUM_DECODER_LAYERS': 'num_decoder_layers',
'NUM_HEADS': 'num_heads',
'HEAD_DIM': 'd_kv',
'EMBED_DIM': 'd_model',
'MLP_DIM': 'd_ff',
'NUM_SELECTED_EXPERTS': 'num_selected_experts',
'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers',
'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers',
'dense.MlpBlock.activations': 'feed_forward_proj',
}
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
# Convert a google style config to the hugging face fromat
import regex as re
with open(_UpperCAmelCase , 'r' ) as f:
_a = f.read()
_a = re.findall(R'(.*) = ([0-9.]*)' , _UpperCAmelCase )
_a = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
_a = float(_UpperCAmelCase ) if '.' in value else int(_UpperCAmelCase )
_a = re.findall(R'(.*activations) = \(\'(.*)\',\)' , _UpperCAmelCase )[0]
_a = str(activation[1] )
_a = num_experts
_a = SwitchTransformersConfig(**_UpperCAmelCase )
return config
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase="./" , _UpperCAmelCase=8 ) -> int:
# Initialise PyTorch model
print(f"""Loading flax weights from : {flax_checkpoint_path}""" )
_a = checkpoints.load_tax_checkpoint(_UpperCAmelCase )
if gin_file is not None:
_a = convert_gin_to_config(_UpperCAmelCase , _UpperCAmelCase )
else:
_a = SwitchTransformersConfig.from_pretrained(_UpperCAmelCase )
_a = SwitchTransformersForConditionalGeneration(_UpperCAmelCase )
_a = flax_params['target']
_a = flatten_dict(_UpperCAmelCase , sep='/' )
_a = rename_keys(_UpperCAmelCase )
_a = unflatten_dict(_UpperCAmelCase , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(_UpperCAmelCase , _UpperCAmelCase )
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
pt_model.save_pretrained(_UpperCAmelCase )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'
' model architecture. If not provided, a `gin_file` has to be provided.'
),
)
parser.add_argument(
'--gin_file',
default=None,
type=str,
required=False,
help='Path to the gin config file. If not provided, a `config_file` has to be passed ',
)
parser.add_argument(
'--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.'
)
parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts')
lowercase_ = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 562
|
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = "naver-clova-ix/donut-base-finetuned-docvqa"
_A = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
_A = "document_qa"
_A = AutoProcessor
_A = VisionEncoderDecoderModel
_A = ["image", "text"]
_A = ["text"]
def __init__( self : Optional[Any] , *SCREAMING_SNAKE_CASE_ : List[Any] , **SCREAMING_SNAKE_CASE_ : str ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def _UpperCAmelCase ( self : Tuple , SCREAMING_SNAKE_CASE_ : "Image" , SCREAMING_SNAKE_CASE_ : str ):
_a = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
_a = task_prompt.replace('{user_input}' , SCREAMING_SNAKE_CASE_ )
_a = self.pre_processor.tokenizer(
SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).input_ids
_a = self.pre_processor(SCREAMING_SNAKE_CASE_ , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _UpperCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=SCREAMING_SNAKE_CASE_ , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=SCREAMING_SNAKE_CASE_ , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=SCREAMING_SNAKE_CASE_ , ).sequences
def _UpperCAmelCase ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
_a = self.pre_processor.batch_decode(SCREAMING_SNAKE_CASE_ )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
_a = re.sub(R'<.*?>' , '' , SCREAMING_SNAKE_CASE_ , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(SCREAMING_SNAKE_CASE_ )
return sequence["answer"]
| 562
| 1
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCamelCase = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""] )
def _a ( __UpperCamelCase , __UpperCamelCase ):
inspect_dataset(_UpperCAmelCase , _UpperCAmelCase )
a_ : List[str] = path + """.py"""
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""" )
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""" )
@pytest.mark.parametrize("""path""" , ["""accuracy"""] )
def _a ( __UpperCamelCase , __UpperCamelCase ):
inspect_metric(_UpperCAmelCase , _UpperCAmelCase )
a_ : Optional[Any] = path + """.py"""
assert script_name in os.listdir(_UpperCAmelCase )
assert "__pycache__" not in os.listdir(_UpperCAmelCase )
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : int = get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
with pytest.raises(_UpperCAmelCase ):
get_dataset_config_info(_UpperCAmelCase , config_name=_UpperCAmelCase )
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def _a ( __UpperCamelCase , __UpperCamelCase ):
a_ : Any = get_dataset_config_names(_UpperCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Tuple = get_dataset_infos(_UpperCAmelCase )
assert list(infos.keys() ) == expected_configs
a_ : Optional[Any] = expected_configs[0]
assert expected_config in infos
a_ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
a_ : Tuple = get_dataset_infos(_UpperCAmelCase )
assert expected_config in infos
a_ : int = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def _a ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
with pytest.raises(_UpperCAmelCase ):
get_dataset_split_names(_UpperCAmelCase , config_name=_UpperCAmelCase )
| 716
|
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
__lowerCamelCase = logging.getLogger(__name__)
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: str = """summarization"""
lowerCamelCase__: List[str] = ["""loss"""]
lowerCamelCase__: List[str] = ROUGE_KEYS
lowerCamelCase__: Union[str, Any] = """rouge2"""
def __init__( self : Union[str, Any] , lowerCamelCase_ : Tuple , **lowerCamelCase_ : List[str] ):
if hparams.sortish_sampler and hparams.gpus > 1:
a_ : Any = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("""Dynamic Batch size does not work for multi-gpu training""" )
if hparams.sortish_sampler:
raise ValueError("""--sortish_sampler and --max_tokens_per_batch may not be used simultaneously""" )
super().__init__(lowerCamelCase_ , num_labels=lowerCamelCase_ , mode=self.mode , **lowerCamelCase_ )
use_task_specific_params(self.model , """summarization""" )
save_git_info(self.hparams.output_dir )
a_ : Any = Path(self.output_dir ) / """metrics.json"""
a_ : Tuple = Path(self.output_dir ) / """hparams.pkl"""
pickle_save(self.hparams , self.hparams_save_path )
a_ : Dict = 0
a_ : Optional[Any] = defaultdict(lowerCamelCase_ )
a_ : str = self.config.model_type
a_ : Union[str, Any] = self.config.tgt_vocab_size if self.model_type == """fsmt""" else self.config.vocab_size
a_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
a_ : int = {
"""train""": self.hparams.n_train,
"""val""": self.hparams.n_val,
"""test""": self.hparams.n_test,
}
a_ : List[str] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
a_ : Optional[Any] = {
"""train""": self.hparams.max_target_length,
"""val""": self.hparams.val_max_target_length,
"""test""": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], F'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], F'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
a_ : int = get_git_info()["""repo_sha"""]
a_ : Union[str, Any] = hparams.num_workers
a_ : Tuple = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCamelCase_ ):
a_ : Dict = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
a_ : List[str] = self.decoder_start_token_id
a_ : Union[str, Any] = (
SeqaSeqDataset if hasattr(self.tokenizer , """prepare_seq2seq_batch""" ) else LegacySeqaSeqDataset
)
a_ : Union[str, Any] = False
a_ : Any = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
a_ : Union[str, Any] = self.hparams.eval_max_gen_length
else:
a_ : Optional[Any] = self.model.config.max_length
a_ : Optional[int] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCAmelCase( self : List[Any] , lowerCamelCase_ : Dict[str, torch.Tensor] ):
a_ : Optional[Any] = {
k: self.tokenizer.batch_decode(v.tolist() ) if """mask""" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCamelCase_ , Path(self.output_dir ) / """text_batch.json""" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / """tok_batch.json""" )
a_ : List[Any] = True
return readable_batch
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , **lowerCamelCase_ : Tuple ):
return self.model(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase( self : Tuple , lowerCamelCase_ : List[int] ):
a_ : str = self.tokenizer.batch_decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ )
return lmap(str.strip , lowerCamelCase_ )
def UpperCAmelCase( self : Optional[int] , lowerCamelCase_ : dict ):
a_ : int = self.tokenizer.pad_token_id
a_ , a_ : Optional[int] = batch["""input_ids"""], batch["""attention_mask"""]
a_ : Optional[Any] = batch["""labels"""]
if isinstance(self.model , lowerCamelCase_ ):
a_ : List[Any] = self.model._shift_right(lowerCamelCase_ )
else:
a_ : Dict = shift_tokens_right(lowerCamelCase_ , lowerCamelCase_ )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
a_ : int = decoder_input_ids
self.save_readable_batch(lowerCamelCase_ )
a_ : str = self(lowerCamelCase_ , attention_mask=lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , use_cache=lowerCamelCase_ )
a_ : int = outputs["""logits"""]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
a_ : Any = nn.CrossEntropyLoss(ignore_index=lowerCamelCase_ )
assert lm_logits.shape[-1] == self.vocab_size
a_ : Union[str, Any] = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
a_ : int = nn.functional.log_softmax(lowerCamelCase_ , dim=-1 )
a_ , a_ : str = label_smoothed_nll_loss(
lowerCamelCase_ , lowerCamelCase_ , self.hparams.label_smoothing , ignore_index=lowerCamelCase_ )
return (loss,)
@property
def UpperCAmelCase( self : Union[str, Any] ):
return self.tokenizer.pad_token_id
def UpperCAmelCase( self : str , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : List[Any] ):
a_ : Dict = self._step(lowerCamelCase_ )
a_ : Optional[int] = dict(zip(self.loss_names , lowerCamelCase_ ) )
# tokens per batch
a_ : str = batch["""input_ids"""].ne(self.pad ).sum() + batch["""labels"""].ne(self.pad ).sum()
a_ : Optional[int] = batch["""input_ids"""].shape[0]
a_ : int = batch["""input_ids"""].eq(self.pad ).sum()
a_ : str = batch["""input_ids"""].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCAmelCase( self : int , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Tuple ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : Any , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int]="val" ):
self.step_count += 1
a_ : Optional[Any] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
a_ : Tuple = losses["""loss"""]
a_ : Optional[int] = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["""gen_time""", """gen_len"""]
}
a_ : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
a_ : torch.FloatTensor = torch.tensor(lowerCamelCase_ ).type_as(lowerCamelCase_ )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCamelCase_ )
a_ : str = {F'''{prefix}_avg_{k}''': x for k, x in losses.items()}
a_ : Union[str, Any] = self.step_count
self.metrics[prefix].append(lowerCamelCase_ ) # callback writes this to self.metrics_save_path
a_ : List[str] = flatten_list([x["""preds"""] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
F'''{prefix}_loss''': loss,
F'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCAmelCase( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : Optional[Any] ):
return calculate_rouge(lowerCamelCase_ , lowerCamelCase_ )
def UpperCAmelCase( self : int , lowerCamelCase_ : dict ):
a_ : Dict = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
a_ : Any = self.model.generate(
batch["""input_ids"""] , attention_mask=batch["""attention_mask"""] , use_cache=lowerCamelCase_ , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
a_ : Optional[int] = (time.time() - ta) / batch["""input_ids"""].shape[0]
a_ : List[str] = self.ids_to_clean_text(lowerCamelCase_ )
a_ : List[str] = self.ids_to_clean_text(batch["""labels"""] )
a_ : Tuple = self._step(lowerCamelCase_ )
a_ : List[str] = dict(zip(self.loss_names , lowerCamelCase_ ) )
a_ : Dict = self.calc_generative_metrics(lowerCamelCase_ , lowerCamelCase_ )
a_ : Optional[int] = np.mean(lmap(lowerCamelCase_ , lowerCamelCase_ ) )
base_metrics.update(gen_time=lowerCamelCase_ , gen_len=lowerCamelCase_ , preds=lowerCamelCase_ , target=lowerCamelCase_ , **lowerCamelCase_ )
return base_metrics
def UpperCAmelCase( self : int , lowerCamelCase_ : str , lowerCamelCase_ : Optional[int] ):
return self._generative_step(lowerCamelCase_ )
def UpperCAmelCase( self : str , lowerCamelCase_ : Any ):
return self.validation_epoch_end(lowerCamelCase_ , prefix="""test""" )
def UpperCAmelCase( self : Any , lowerCamelCase_ : Any ):
a_ : List[str] = self.n_obs[type_path]
a_ : Dict = self.target_lens[type_path]
a_ : Optional[Any] = self.dataset_class(
self.tokenizer , type_path=lowerCamelCase_ , n_obs=lowerCamelCase_ , max_target_length=lowerCamelCase_ , **self.dataset_kwargs , )
return dataset
def UpperCAmelCase( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : bool = False ):
a_ : List[str] = self.get_dataset(lowerCamelCase_ )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
a_ : List[str] = dataset.make_sortish_sampler(lowerCamelCase_ , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
a_ : int = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCamelCase_ , batch_sampler=lowerCamelCase_ , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCamelCase_ , batch_size=lowerCamelCase_ , collate_fn=dataset.collate_fn , shuffle=lowerCamelCase_ , num_workers=self.num_workers , sampler=lowerCamelCase_ , )
def UpperCAmelCase( self : Any ):
a_ : int = self.get_dataloader("""train""" , batch_size=self.hparams.train_batch_size , shuffle=lowerCamelCase_ )
return dataloader
def UpperCAmelCase( self : Dict ):
return self.get_dataloader("""val""" , batch_size=self.hparams.eval_batch_size )
def UpperCAmelCase( self : List[Any] ):
return self.get_dataloader("""test""" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCAmelCase( lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] ):
BaseTransformer.add_model_specific_args(lowerCamelCase_ , lowerCamelCase_ )
add_generic_args(lowerCamelCase_ , lowerCamelCase_ )
parser.add_argument(
"""--max_source_length""" , default=1_0_2_4 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--max_target_length""" , default=5_6 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--val_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--test_max_target_length""" , default=1_4_2 , type=lowerCamelCase_ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument("""--freeze_encoder""" , action="""store_true""" )
parser.add_argument("""--freeze_embeds""" , action="""store_true""" )
parser.add_argument("""--sortish_sampler""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--overwrite_output_dir""" , action="""store_true""" , default=lowerCamelCase_ )
parser.add_argument("""--max_tokens_per_batch""" , type=lowerCamelCase_ , default=lowerCamelCase_ )
parser.add_argument("""--logger_name""" , type=lowerCamelCase_ , choices=["""default""", """wandb""", """wandb_shared"""] , default="""default""" )
parser.add_argument("""--n_train""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_val""" , type=lowerCamelCase_ , default=5_0_0 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--n_test""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument(
"""--task""" , type=lowerCamelCase_ , default="""summarization""" , required=lowerCamelCase_ , help="""# examples. -1 means use all.""" )
parser.add_argument("""--label_smoothing""" , type=lowerCamelCase_ , default=0.0 , required=lowerCamelCase_ )
parser.add_argument("""--src_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--tgt_lang""" , type=lowerCamelCase_ , default="""""" , required=lowerCamelCase_ )
parser.add_argument("""--eval_beams""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ )
parser.add_argument(
"""--val_metric""" , type=lowerCamelCase_ , default=lowerCamelCase_ , required=lowerCamelCase_ , choices=["""bleu""", """rouge2""", """loss""", None] )
parser.add_argument("""--eval_max_gen_length""" , type=lowerCamelCase_ , default=lowerCamelCase_ , help="""never generate more than n tokens""" )
parser.add_argument("""--save_top_k""" , type=lowerCamelCase_ , default=1 , required=lowerCamelCase_ , help="""How many checkpoints to save""" )
parser.add_argument(
"""--early_stopping_patience""" , type=lowerCamelCase_ , default=-1 , required=lowerCamelCase_ , help=(
"""-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"""
""" val_check_interval will effect it."""
) , )
return parser
class a__ ( lowerCAmelCase_ ):
lowerCamelCase__: List[Any] = """translation"""
lowerCamelCase__: int = ["""loss"""]
lowerCamelCase__: List[str] = ["""bleu"""]
lowerCamelCase__: Optional[Any] = """bleu"""
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Tuple ):
super().__init__(lowerCamelCase_ , **lowerCamelCase_ )
a_ : Union[str, Any] = hparams.src_lang
a_ : Optional[int] = hparams.tgt_lang
def UpperCAmelCase( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : int ):
return calculate_bleu(lowerCamelCase_ , lowerCamelCase_ )
def _a ( __UpperCamelCase , __UpperCamelCase=None ):
Path(args.output_dir ).mkdir(exist_ok=__UpperCamelCase )
check_output_dir(__UpperCamelCase , expected_items=3 )
if model is None:
if "summarization" in args.task:
a_ : SummarizationModule = SummarizationModule(__UpperCamelCase )
else:
a_ : SummarizationModule = TranslationModule(__UpperCamelCase )
a_ : Dict = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("""/tmp""" )
or str(args.output_dir ).startswith("""/var""" )
):
a_ : Union[str, Any] = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = os.environ.get("""WANDB_PROJECT""" , __UpperCamelCase )
a_ : str = WandbLogger(name=model.output_dir.name , project=__UpperCamelCase )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
a_ : Optional[int] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
a_ : Any = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
a_ : str = False
a_ : Tuple = args.val_metric == """loss"""
a_ : pl.Trainer = generic_train(
__UpperCamelCase , __UpperCamelCase , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __UpperCamelCase ) , early_stopping_callback=__UpperCamelCase , logger=__UpperCamelCase , )
pickle_save(model.hparams , model.output_dir / """hparams.pkl""" )
if not args.do_predict:
return model
a_ : Union[str, Any] = """"""
a_ : int = sorted(glob.glob(os.path.join(args.output_dir , """*.ckpt""" ) , recursive=__UpperCamelCase ) )
if checkpoints:
a_ : Tuple = checkpoints[-1]
a_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = pl.Trainer.add_argparse_args(parser)
__lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
__lowerCamelCase = parser.parse_args()
main(args)
| 478
| 0
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
"facebook/data2vec-vision-base-ft": (
"https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"
),
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[Any] = 'data2vec-vision'
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int=7_68 , SCREAMING_SNAKE_CASE__ : List[Any]=12 , SCREAMING_SNAKE_CASE__ : str=12 , SCREAMING_SNAKE_CASE__ : List[Any]=30_72 , SCREAMING_SNAKE_CASE__ : str="gelu" , SCREAMING_SNAKE_CASE__ : Dict=0.0 , SCREAMING_SNAKE_CASE__ : Tuple=0.0 , SCREAMING_SNAKE_CASE__ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE__ : List[str]=1e-12 , SCREAMING_SNAKE_CASE__ : List[str]=2_24 , SCREAMING_SNAKE_CASE__ : Dict=16 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 , SCREAMING_SNAKE_CASE__ : Any=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : int=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__ : Optional[Any]=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__ : Optional[int]=True , SCREAMING_SNAKE_CASE__ : Any=0.4 , SCREAMING_SNAKE_CASE__ : Dict=2_56 , SCREAMING_SNAKE_CASE__ : Optional[int]=1 , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : List[Any]=2_55 , **SCREAMING_SNAKE_CASE__ : List[Any] , ):
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = use_mask_token
lowerCamelCase__ = use_absolute_position_embeddings
lowerCamelCase__ = use_relative_position_bias
lowerCamelCase__ = use_shared_relative_position_bias
lowerCamelCase__ = layer_scale_init_value
lowerCamelCase__ = drop_path_rate
lowerCamelCase__ = use_mean_pooling
# decode head attributes (semantic segmentation)
lowerCamelCase__ = out_indices
lowerCamelCase__ = pool_scales
# auxiliary head attributes (semantic segmentation)
lowerCamelCase__ = use_auxiliary_head
lowerCamelCase__ = auxiliary_loss_weight
lowerCamelCase__ = auxiliary_channels
lowerCamelCase__ = auxiliary_num_convs
lowerCamelCase__ = auxiliary_concat_input
lowerCamelCase__ = semantic_loss_ignore_index
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = version.parse('1.11' )
@property
def _UpperCamelCase ( self : Any ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _UpperCamelCase ( self : str ):
return 1e-4
| 510
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _a ( SCREAMING_SNAKE_CASE_ ):
a_ : List[str] = 'megatron-bert'
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict=2_90_56 , SCREAMING_SNAKE_CASE__ : Optional[int]=10_24 , SCREAMING_SNAKE_CASE__ : int=24 , SCREAMING_SNAKE_CASE__ : Optional[Any]=16 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=40_96 , SCREAMING_SNAKE_CASE__ : List[str]="gelu" , SCREAMING_SNAKE_CASE__ : Tuple=0.1 , SCREAMING_SNAKE_CASE__ : str=0.1 , SCREAMING_SNAKE_CASE__ : Any=5_12 , SCREAMING_SNAKE_CASE__ : List[Any]=2 , SCREAMING_SNAKE_CASE__ : List[Any]=0.02 , SCREAMING_SNAKE_CASE__ : str=1e-12 , SCREAMING_SNAKE_CASE__ : str=0 , SCREAMING_SNAKE_CASE__ : Any="absolute" , SCREAMING_SNAKE_CASE__ : Tuple=True , **SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = vocab_size
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = hidden_act
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = max_position_embeddings
lowerCamelCase__ = type_vocab_size
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = position_embedding_type
lowerCamelCase__ = use_cache
| 510
| 1
|
"""simple docstring"""
from math import ceil
def __A (_SCREAMING_SNAKE_CASE = 1001 ) ->int:
"""simple docstring"""
lowerCAmelCase__ :Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
lowerCAmelCase__ :str = 2 * i + 1
lowerCAmelCase__ :Dict = 2 * i
lowerCAmelCase__ :str = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__A = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 713
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class _lowerCAmelCase ( a ):
"""simple docstring"""
__magic_name__ :List[Any] = """roformer"""
def __init__( self , __UpperCAmelCase=5_0_0_0_0 , __UpperCAmelCase=None , __UpperCAmelCase=7_6_8 , __UpperCAmelCase=1_2 , __UpperCAmelCase=1_2 , __UpperCAmelCase=3_0_7_2 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1_5_3_6 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-12 , __UpperCAmelCase=0 , __UpperCAmelCase=False , __UpperCAmelCase=True , **__UpperCAmelCase , ):
'''simple docstring'''
super().__init__(pad_token_id=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Any = vocab_size
lowerCAmelCase__ :Any = hidden_size if embedding_size is None else embedding_size
lowerCAmelCase__ :Optional[int] = hidden_size
lowerCAmelCase__ :int = num_hidden_layers
lowerCAmelCase__ :Optional[Any] = num_attention_heads
lowerCAmelCase__ :List[str] = hidden_act
lowerCAmelCase__ :Optional[Any] = intermediate_size
lowerCAmelCase__ :Optional[int] = hidden_dropout_prob
lowerCAmelCase__ :int = attention_probs_dropout_prob
lowerCAmelCase__ :List[str] = max_position_embeddings
lowerCAmelCase__ :int = type_vocab_size
lowerCAmelCase__ :Tuple = initializer_range
lowerCAmelCase__ :str = layer_norm_eps
lowerCAmelCase__ :Any = rotary_value
lowerCAmelCase__ :List[Any] = use_cache
class _lowerCAmelCase ( a ):
"""simple docstring"""
@property
def snake_case ( self ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ :Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ :List[str] = {0: 'batch', 1: 'sequence'}
lowerCAmelCase__ :Union[str, Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 560
| 0
|
import string
def __snake_case ( lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = ''''''
for i in sequence:
SCREAMING_SNAKE_CASE__ = ord(lowerCAmelCase_ )
if 6_5 <= extract <= 9_0:
output += chr(1_5_5 - extract )
elif 9_7 <= extract <= 1_2_2:
output += chr(2_1_9 - extract )
else:
output += i
return output
def __snake_case ( lowerCAmelCase_ ) -> str:
SCREAMING_SNAKE_CASE__ = string.ascii_letters
SCREAMING_SNAKE_CASE__ = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase_ )] if c in letters else c for c in sequence )
def __snake_case ( ) -> None:
from timeit import timeit
print('''Running performance benchmarks...''' )
SCREAMING_SNAKE_CASE__ = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(f'''> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCAmelCase_ )} seconds''' )
print(f'''> atbash(): {timeit("atbash(printable)" , setup=lowerCAmelCase_ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F'{example} encrypted in atbash: {atbash(example)}')
benchmark()
| 100
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A : Any = {"""configuration_xglm""": ["""XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XGLMConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Optional[Any] = ["""XGLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Any = ["""XGLMTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = [
"""XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XGLMForCausalLM""",
"""XGLMModel""",
"""XGLMPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = [
"""FlaxXGLMForCausalLM""",
"""FlaxXGLMModel""",
"""FlaxXGLMPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = [
"""TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXGLMForCausalLM""",
"""TFXGLMModel""",
"""TFXGLMPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 100
| 1
|
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class A :
def __init__( self, UpperCamelCase__, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = 13
lowerCAmelCase_ = 7
lowerCAmelCase_ = 30
lowerCAmelCase_ = self.seq_length + self.mem_len
lowerCAmelCase_ = 15
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = 99
lowerCAmelCase_ = [10, 50, 80]
lowerCAmelCase_ = 32
lowerCAmelCase_ = 32
lowerCAmelCase_ = 4
lowerCAmelCase_ = 8
lowerCAmelCase_ = 128
lowerCAmelCase_ = 2
lowerCAmelCase_ = 2
lowerCAmelCase_ = None
lowerCAmelCase_ = 1
lowerCAmelCase_ = 0
lowerCAmelCase_ = 3
lowerCAmelCase_ = self.vocab_size - 1
lowerCAmelCase_ = 0.01
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
lowerCAmelCase_ = TransfoXLConfig(
vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.vocab_size - 1, init_range=self.init_range, num_labels=self.num_labels, )
return (config, input_ids_a, input_ids_a, lm_labels)
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TFTransfoXLModel(UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = model(UpperCamelCase__ ).to_tuple()
lowerCAmelCase_ = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowerCAmelCase_ , lowerCAmelCase_ = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TFTransfoXLLMHeadModel(UpperCamelCase__ )
lowerCAmelCase_ , lowerCAmelCase_ = model(UpperCamelCase__ ).to_tuple()
lowerCAmelCase_ = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowerCAmelCase_ , lowerCAmelCase_ = model(UpperCamelCase__ ).to_tuple()
lowerCAmelCase_ , lowerCAmelCase_ = model([input_ids_a, mems_a] ).to_tuple()
lowerCAmelCase_ = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowerCAmelCase_ , lowerCAmelCase_ = model(UpperCamelCase__ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
self.parent.assertEqual(lm_logits_a.shape, (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = TFTransfoXLForSequenceClassification(UpperCamelCase__ )
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) = config_and_inputs
lowerCAmelCase_ = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
__snake_case = () if is_tf_available() else ()
__snake_case = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFTransfoXLModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, d_embed=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.model_tester.set_seed()
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.model_tester.set_seed()
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCAmelCase_ = model.get_output_embeddings()
assert isinstance(UpperCamelCase__, tf.keras.layers.Layer )
lowerCAmelCase_ = model.get_bias()
assert name is None
else:
lowerCAmelCase_ = model.get_output_embeddings()
assert x is None
lowerCAmelCase_ = model.get_bias()
assert name is None
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = TFTransfoXLModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
@require_tf
class A ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCAmelCase_ = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]], dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCAmelCase_ = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCAmelCase_ = model.generate(UpperCamelCase__, max_length=200, do_sample=UpperCamelCase__ )
self.assertListEqual(output_ids[0].numpy().tolist(), UpperCamelCase__ )
| 714
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = KandinskyVaaInpaintPipeline
__snake_case = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image']
__snake_case = [
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
__snake_case = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__snake_case = False
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return 100
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase_ = UNetaDConditionModel(**UpperCamelCase__ )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.dummy_unet
lowerCAmelCase_ = self.dummy_movq
lowerCAmelCase_ = DDIMScheduler(
num_train_timesteps=1000, beta_schedule='''linear''', beta_start=0.00_085, beta_end=0.012, clip_sample=UpperCamelCase__, set_alpha_to_one=UpperCamelCase__, steps_offset=1, prediction_type='''epsilon''', thresholding=UpperCamelCase__, )
lowerCAmelCase_ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__=0 ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCAmelCase_ = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
UpperCamelCase__ )
# create init_image
lowerCAmelCase_ = floats_tensor((1, 3, 64, 64), rng=random.Random(UpperCamelCase__ ) ).to(UpperCamelCase__ )
lowerCAmelCase_ = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(UpperCamelCase__ ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
lowerCAmelCase_ = np.ones((64, 64), dtype=np.floataa )
lowerCAmelCase_ = 0
if str(UpperCamelCase__ ).startswith('''mps''' ):
lowerCAmelCase_ = torch.manual_seed(UpperCamelCase__ )
else:
lowerCAmelCase_ = torch.Generator(device=UpperCamelCase__ ).manual_seed(UpperCamelCase__ )
lowerCAmelCase_ = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = '''cpu'''
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**UpperCamelCase__ )
lowerCAmelCase_ = pipe.to(UpperCamelCase__ )
pipe.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = pipe(**self.get_dummy_inputs(UpperCamelCase__ ) )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = pipe(
**self.get_dummy_inputs(UpperCamelCase__ ), return_dict=UpperCamelCase__, )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
print(f"image.shape {image.shape}" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array(
[0.50_775_903, 0.49_527_195, 0.48_824_543, 0.50_192_237, 0.48_644_906, 0.49_373_814, 0.4_780_598, 0.47_234_827, 0.48_327_848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
lowerCAmelCase_ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
lowerCAmelCase_ = np.ones((768, 768), dtype=np.floataa )
lowerCAmelCase_ = 0
lowerCAmelCase_ = '''a hat'''
lowerCAmelCase_ = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''', torch_dtype=torch.floataa )
pipe_prior.to(UpperCamelCase__ )
lowerCAmelCase_ = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''', torch_dtype=torch.floataa )
lowerCAmelCase_ = pipeline.to(UpperCamelCase__ )
pipeline.set_progress_bar_config(disable=UpperCamelCase__ )
lowerCAmelCase_ = torch.Generator(device='''cpu''' ).manual_seed(0 )
lowerCAmelCase_ , lowerCAmelCase_ = pipe_prior(
UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=5, negative_prompt='''''', ).to_tuple()
lowerCAmelCase_ = pipeline(
image=UpperCamelCase__, mask_image=UpperCamelCase__, image_embeds=UpperCamelCase__, negative_image_embeds=UpperCamelCase__, generator=UpperCamelCase__, num_inference_steps=100, height=768, width=768, output_type='''np''', )
lowerCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCamelCase__, UpperCamelCase__ )
| 325
| 0
|
"""simple docstring"""
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __SCREAMING_SNAKE_CASE ( *A_ ):
with open(A_ , '''r''' ) as fh:
fcntl.flock(A_ , fcntl.LOCK_EX )
try:
print(*A_ )
finally:
fcntl.flock(A_ , fcntl.LOCK_UN )
__UpperCamelCase : Any = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
__UpperCamelCase : str = torch.device('''cuda''', local_rank)
__UpperCamelCase : Union[str, Any] = socket.gethostname()
__UpperCamelCase : Union[str, Any] = F'''[{hostname}-{local_rank}]'''
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
__UpperCamelCase : int = dist.get_rank()
__UpperCamelCase : Dict = dist.get_world_size()
printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''')
dist.barrier()
if rank == 0:
printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''')
except Exception:
printflock(F'''{gpu} is broken''')
raise
| 450
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase : str = {
'''configuration_swinv2''': ['''SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Swinv2Config'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[Any] = [
'''SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Swinv2ForImageClassification''',
'''Swinv2ForMaskedImageModeling''',
'''Swinv2Model''',
'''Swinv2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 450
| 1
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCamelCase = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def _snake_case ( __snake_case ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ : Any = {}
state_dict.pop("pixel_mean" , __snake_case )
state_dict.pop("pixel_std" , __snake_case )
UpperCAmelCase_ : Optional[Any] = r".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
UpperCAmelCase_ : Optional[Any] = key.replace(__snake_case , __snake_case )
if re.match(__snake_case , __snake_case ):
UpperCAmelCase_ : Any = int(re.match(__snake_case , __snake_case ).group(2 ) )
if layer_nb == 0:
UpperCAmelCase_ : Tuple = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
UpperCAmelCase_ : str = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
UpperCAmelCase_ : Union[str, Any] = key.replace("layers.2" , "proj_out" )
UpperCAmelCase_ : Union[str, Any] = value
UpperCAmelCase_ : Optional[int] = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def _snake_case ( __snake_case , __snake_case , __snake_case , __snake_case="ybelkada/segment-anything" ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ : Dict = hf_hub_download(__snake_case , F"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
UpperCAmelCase_ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
UpperCAmelCase_ : List[Any] = SamVisionConfig(
hidden_size=1_0_2_4 , num_hidden_layers=2_4 , num_attention_heads=1_6 , global_attn_indexes=[5, 1_1, 1_7, 2_3] , )
UpperCAmelCase_ : Any = SamConfig(
vision_config=__snake_case , )
elif "sam_vit_h" in model_name:
UpperCAmelCase_ : Any = SamVisionConfig(
hidden_size=1_2_8_0 , num_hidden_layers=3_2 , num_attention_heads=1_6 , global_attn_indexes=[7, 1_5, 2_3, 3_1] , )
UpperCAmelCase_ : Tuple = SamConfig(
vision_config=__snake_case , )
UpperCAmelCase_ : Optional[int] = torch.load(__snake_case , map_location="cpu" )
UpperCAmelCase_ : List[Any] = replace_keys(__snake_case )
UpperCAmelCase_ : Any = SamImageProcessor()
UpperCAmelCase_ : Optional[int] = SamProcessor(image_processor=__snake_case )
UpperCAmelCase_ : Optional[int] = SamModel(__snake_case )
hf_model.load_state_dict(__snake_case )
UpperCAmelCase_ : Optional[int] = hf_model.to("cuda" )
UpperCAmelCase_ : Optional[Any] = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
UpperCAmelCase_ : Any = Image.open(requests.get(__snake_case , stream=__snake_case ).raw ).convert("RGB" )
UpperCAmelCase_ : str = [[[4_0_0, 6_5_0]]]
UpperCAmelCase_ : List[str] = [[1]]
UpperCAmelCase_ : Any = processor(images=np.array(__snake_case ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = hf_model(**__snake_case )
UpperCAmelCase_ : Optional[int] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
UpperCAmelCase_ : Tuple = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ : List[Any] = hf_model(**__snake_case )
UpperCAmelCase_ : Optional[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
UpperCAmelCase_ : int = ((7_5, 2_7_5, 1_7_2_5, 8_5_0),)
UpperCAmelCase_ : Union[str, Any] = processor(images=np.array(__snake_case ) , input_boxes=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ : Optional[int] = hf_model(**__snake_case )
UpperCAmelCase_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
UpperCAmelCase_ : Any = [[[4_0_0, 6_5_0], [8_0_0, 6_5_0]]]
UpperCAmelCase_ : List[Any] = [[1, 1]]
UpperCAmelCase_ : Optional[Any] = processor(
images=np.array(__snake_case ) , input_points=__snake_case , input_labels=__snake_case , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = hf_model(**__snake_case )
UpperCAmelCase_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
__lowerCamelCase = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
'''--model_name''',
default='''sam_vit_h_4b8939''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
parser.add_argument(
'''--model_hub_id''',
default='''ybelkada/segment-anything''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
__lowerCamelCase = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 455
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class snake_case_ (lowercase__ ):
"""simple docstring"""
_lowerCamelCase = """lxmert"""
_lowerCamelCase = {}
def __init__( self ,lowercase=30522 ,lowercase=768 ,lowercase=12 ,lowercase=9500 ,lowercase=1600 ,lowercase=400 ,lowercase=3072 ,lowercase="gelu" ,lowercase=0.1 ,lowercase=0.1 ,lowercase=512 ,lowercase=2 ,lowercase=0.02 ,lowercase=1E-12 ,lowercase=9 ,lowercase=5 ,lowercase=5 ,lowercase=2048 ,lowercase=4 ,lowercase=6.67 ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,lowercase=True ,**lowercase ,):
"""simple docstring"""
UpperCAmelCase_ : List[str] = vocab_size
UpperCAmelCase_ : str = hidden_size
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : Dict = hidden_act
UpperCAmelCase_ : List[Any] = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : List[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = max_position_embeddings
UpperCAmelCase_ : str = type_vocab_size
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Optional[Any] = layer_norm_eps
UpperCAmelCase_ : Any = num_qa_labels
UpperCAmelCase_ : str = num_object_labels
UpperCAmelCase_ : Dict = num_attr_labels
UpperCAmelCase_ : Tuple = l_layers
UpperCAmelCase_ : Tuple = x_layers
UpperCAmelCase_ : int = r_layers
UpperCAmelCase_ : Optional[Any] = visual_feat_dim
UpperCAmelCase_ : List[Any] = visual_pos_dim
UpperCAmelCase_ : int = visual_loss_normalizer
UpperCAmelCase_ : str = task_matched
UpperCAmelCase_ : str = task_mask_lm
UpperCAmelCase_ : int = task_obj_predict
UpperCAmelCase_ : List[str] = task_qa
UpperCAmelCase_ : Optional[int] = visual_obj_loss
UpperCAmelCase_ : List[str] = visual_attr_loss
UpperCAmelCase_ : str = visual_feat_loss
UpperCAmelCase_ : List[Any] = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers}
super().__init__(**lowercase)
| 455
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _snake_case ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
# Construct model
if gpta_config_file == "":
lowerCAmelCase = GPTaConfig()
else:
lowerCAmelCase = GPTaConfig.from_json_file(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = GPTaModel(_SCREAMING_SNAKE_CASE )
# Load weights from numpy
load_tf_weights_in_gpta(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save pytorch-model
lowerCAmelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
lowerCAmelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f'Save PyTorch model to {pytorch_weights_dump_path}' )
torch.save(model.state_dict() , _SCREAMING_SNAKE_CASE )
print(f'Save configuration file to {pytorch_config_dump_path}' )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--gpt2_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--gpt2_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
UpperCAmelCase = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 433
|
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
UpperCAmelCase = 'src/transformers'
# This is to make sure the transformers module imported is the one in the repo.
UpperCAmelCase = importlib.util.spec_from_file_location(
'transformers',
os.path.join(PATH_TO_TRANSFORMERS, '__init__.py'),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
UpperCAmelCase = spec.loader.load_module()
UpperCAmelCase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
UpperCAmelCase = re.compile('\[(.+?)\]\((https://huggingface\.co/.+?)\)')
UpperCAmelCase = {
'CLIPConfigMixin',
'DecisionTransformerConfigMixin',
'EncoderDecoderConfigMixin',
'RagConfigMixin',
'SpeechEncoderDecoderConfigMixin',
'VisionEncoderDecoderConfigMixin',
'VisionTextDualEncoderConfigMixin',
}
def _snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
lowerCAmelCase = False
# source code of `config_class`
lowerCAmelCase = inspect.getsource(_SCREAMING_SNAKE_CASE )
lowerCAmelCase = _re_checkpoint.findall(_SCREAMING_SNAKE_CASE )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
lowerCAmelCase, lowerCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase = True
break
lowerCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
lowerCAmelCase = """\n""".join(sorted(_SCREAMING_SNAKE_CASE ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 433
| 1
|
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = 1
__snake_case = 3
__snake_case = (3_2, 3_2)
__snake_case = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE )
return image
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
__snake_case = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(SCREAMING_SNAKE_CASE )
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict ) -> List[Any]:
'''simple docstring'''
def extract(*SCREAMING_SNAKE_CASE : Any , **SCREAMING_SNAKE_CASE : str ):
class UpperCamelCase:
def __init__( self : Tuple ) -> List[Any]:
'''simple docstring'''
__snake_case = torch.ones([0] )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] ) -> int:
'''simple docstring'''
self.pixel_values.to(SCREAMING_SNAKE_CASE )
return self
return Out()
return extract
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[str]:
'''simple docstring'''
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.dummy_cond_unet
__snake_case = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case = 7_7
__snake_case = self.dummy_image.to(SCREAMING_SNAKE_CASE )
__snake_case = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__snake_case = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE )
__snake_case = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = "A painting of a squirrel eating a burger"
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=SCREAMING_SNAKE_CASE , )
__snake_case = output.images
__snake_case = torch.Generator(device=SCREAMING_SNAKE_CASE ).manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , )[0]
__snake_case = image[0, -3:, -3:, -1]
__snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
__snake_case = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.dummy_cond_unet
__snake_case = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE )
__snake_case = self.dummy_vae
__snake_case = self.dummy_text_encoder
__snake_case = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
__snake_case = 7_7
__snake_case = self.dummy_image.to(SCREAMING_SNAKE_CASE )
# put models in fp16
__snake_case = unet.half()
__snake_case = vae.half()
__snake_case = bert.half()
# make sure here that pndm scheduler skips prk
__snake_case = AltDiffusionImgaImgPipeline(
unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=self.dummy_extractor , )
__snake_case = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=SCREAMING_SNAKE_CASE )
__snake_case = alt_pipe.to(SCREAMING_SNAKE_CASE )
alt_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
__snake_case = "A painting of a squirrel eating a burger"
__snake_case = torch.manual_seed(0 )
__snake_case = alt_pipe(
[prompt] , generator=SCREAMING_SNAKE_CASE , num_inference_steps=2 , output_type="np" , image=SCREAMING_SNAKE_CASE , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
__snake_case = init_image.resize((7_6_0, 5_0_4) )
__snake_case = "BAAI/AltDiffusion"
__snake_case = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__snake_case = "A fantasy landscape, trending on artstation"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , output_type="np" , )
__snake_case = output.images[0]
__snake_case = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
__snake_case = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
__snake_case = init_image.resize((7_6_8, 5_1_2) )
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
__snake_case = "BAAI/AltDiffusion"
__snake_case = AltDiffusionImgaImgPipeline.from_pretrained(
SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , )
pipe.to(SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
pipe.enable_attention_slicing()
__snake_case = "A fantasy landscape, trending on artstation"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , strength=0.75 , guidance_scale=7.5 , generator=SCREAMING_SNAKE_CASE , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 473
|
from __future__ import annotations
def _lowerCAmelCase ( _lowerCAmelCase ) -> list:
'''simple docstring'''
if len(_lowerCAmelCase ) == 0:
return []
__snake_case , __snake_case = min(_lowerCAmelCase ), max(_lowerCAmelCase )
__snake_case = int(max_value - min_value ) + 1
__snake_case = [[] for _ in range(_lowerCAmelCase )]
for i in my_list:
buckets[int(i - min_value )].append(_lowerCAmelCase )
return [v for bucket in buckets for v in sorted(_lowerCAmelCase )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 473
| 1
|
def lowercase_ ( SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
snake_case__ : Tuple =''''''
while len(SCREAMING_SNAKE_CASE ) % 3 != 0:
snake_case__ : List[str] ='''0''' + bin_string
snake_case__ : str =[
bin_string[index : index + 3]
for index in range(len(SCREAMING_SNAKE_CASE ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
snake_case__ : Union[str, Any] =0
for index, val in enumerate(SCREAMING_SNAKE_CASE ):
oct_val += int(2 ** (2 - index) * int(SCREAMING_SNAKE_CASE ) )
oct_string += str(SCREAMING_SNAKE_CASE )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 381
|
"""simple docstring"""
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class __UpperCAmelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self , _A="" , _A="train" ):
'''simple docstring'''
assert os.path.isdir(_A )
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =os.listdir(_A )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_SCREAMING_SNAKE_CASE =os.path.join(_A , _A )
if not os.path.isfile(_A ):
continue
self.documents.append(_A )
def __len__( self ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self , _A ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.documents[idx]
_SCREAMING_SNAKE_CASE =document_path.split('''/''' )[-1]
with open(_A , encoding='''utf-8''' ) as source:
_SCREAMING_SNAKE_CASE =source.read()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =process_story(_A )
return document_name, story_lines, summary_lines
def _lowerCAmelCase(a : str ) -> Any:
_SCREAMING_SNAKE_CASE =list(filter(lambda a : len(a ) != 0 , [line.strip() for line in raw_story.split('''\n''' )] ) )
# for some unknown reason some lines miss a period, add it
_SCREAMING_SNAKE_CASE =[_add_missing_period(a ) for line in nonempty_lines]
# gather article lines
_SCREAMING_SNAKE_CASE =[]
_SCREAMING_SNAKE_CASE =deque(a )
while True:
try:
_SCREAMING_SNAKE_CASE =lines.popleft()
if element.startswith('''@highlight''' ):
break
story_lines.append(a )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_SCREAMING_SNAKE_CASE =list(filter(lambda a : not t.startswith('''@highlight''' ) , a ) )
return story_lines, summary_lines
def _lowerCAmelCase(a : Optional[Any] ) -> Optional[int]:
_SCREAMING_SNAKE_CASE =['''.''', '''!''', '''?''', '''...''', '''\'''', '''`''', '''"''', '''\u2019''', '''\u2019''', ''')''']
if line.startswith('''@highlight''' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def _lowerCAmelCase(a : Optional[int] , a : Optional[Any] , a : Tuple ) -> Union[str, Any]:
if len(a ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(a )) )
return sequence
def _lowerCAmelCase(a : Optional[int] , a : Any ) -> Dict:
_SCREAMING_SNAKE_CASE =torch.ones_like(a )
_SCREAMING_SNAKE_CASE =sequence == pad_token_id
_SCREAMING_SNAKE_CASE =0
return mask
def _lowerCAmelCase(a : List[str] , a : Union[str, Any] , a : Optional[Any] ) -> Dict:
_SCREAMING_SNAKE_CASE =[tokenizer.encode(a ) for line in story_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in story_lines_token_ids for token in sentence]
_SCREAMING_SNAKE_CASE =[tokenizer.encode(a ) for line in summary_lines]
_SCREAMING_SNAKE_CASE =[token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def _lowerCAmelCase(a : Optional[int] , a : str ) -> int:
_SCREAMING_SNAKE_CASE =[]
for sequence in batch:
_SCREAMING_SNAKE_CASE =-1
_SCREAMING_SNAKE_CASE =[]
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(a )
return torch.tensor(a )
| 255
| 0
|
from __future__ import annotations
_snake_case = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ):
'''simple docstring'''
lowerCamelCase : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(SCREAMING_SNAKE_CASE_ ) )
] # the reference grid
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : List[str] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(SCREAMING_SNAKE_CASE_ ) )
] # the action grid
lowerCamelCase : Union[str, Any] = init[0]
lowerCamelCase : Dict = init[1]
lowerCamelCase : Any = 0
lowerCamelCase : Any = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCamelCase : Optional[int] = [[f, g, x, y]]
lowerCamelCase : List[Any] = False # flag that is set when search is complete
lowerCamelCase : str = False # flag set if we can't find expand
while not found and not resign:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCamelCase : List[str] = cell.pop()
lowerCamelCase : Optional[int] = next_cell[2]
lowerCamelCase : Any = next_cell[3]
lowerCamelCase : List[str] = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCamelCase : List[Any] = True
else:
for i in range(len(SCREAMING_SNAKE_CASE_ ) ): # to try out different valid actions
lowerCamelCase : int = x + DIRECTIONS[i][0]
lowerCamelCase : str = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(SCREAMING_SNAKE_CASE_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCamelCase : Any = g + cost
lowerCamelCase : str = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCamelCase : Dict = 1
lowerCamelCase : Optional[int] = i
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Any = goal[0]
lowerCamelCase : Optional[int] = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCamelCase : int = x - DIRECTIONS[action[x][y]][0]
lowerCamelCase : Any = y - DIRECTIONS[action[x][y]][1]
lowerCamelCase : str = xa
lowerCamelCase : Optional[int] = ya
invpath.append([x, y] )
lowerCamelCase : List[str] = []
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
path.append(invpath[len(SCREAMING_SNAKE_CASE_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
_snake_case = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
_snake_case = [0, 0]
# all coordinates are given in format [y,x]
_snake_case = [len(grid) - 1, len(grid[0]) - 1]
_snake_case = 1
# the cost map which pushes the path closer to the goal
_snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
_snake_case = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
_snake_case = 99
_snake_case , _snake_case = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 231
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
lowerCamelCase : Any = {
"input_ids": tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
lowerCamelCase : Tuple = model(__A )["last_hidden_state"]
lowerCamelCase : Tuple = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
lowerCamelCase : str = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 231
| 1
|
import os
import sys
import unittest
a : Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
a : Tuple = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''')
a : Union[str, Any] = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''')
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def A ( self ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = get_test_to_tester_mapping(snake_case_ )
__lowercase = get_test_to_tester_mapping(snake_case_ )
__lowercase = {'''BertModelTest''': '''BertModelTester'''}
__lowercase = {
'''BlipModelTest''': '''BlipModelTester''',
'''BlipTextImageModelTest''': '''BlipTextImageModelsModelTester''',
'''BlipTextModelTest''': '''BlipTextModelTester''',
'''BlipTextRetrievalModelTest''': '''BlipTextRetrievalModelTester''',
'''BlipVQAModelTest''': '''BlipVQAModelTester''',
'''BlipVisionModelTest''': '''BlipVisionModelTester''',
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def A ( self ) -> str:
'''simple docstring'''
__lowercase = get_model_to_test_mapping(snake_case_ )
__lowercase = get_model_to_test_mapping(snake_case_ )
__lowercase = {
'''BertForMaskedLM''': ['''BertModelTest'''],
'''BertForMultipleChoice''': ['''BertModelTest'''],
'''BertForNextSentencePrediction''': ['''BertModelTest'''],
'''BertForPreTraining''': ['''BertModelTest'''],
'''BertForQuestionAnswering''': ['''BertModelTest'''],
'''BertForSequenceClassification''': ['''BertModelTest'''],
'''BertForTokenClassification''': ['''BertModelTest'''],
'''BertLMHeadModel''': ['''BertModelTest'''],
'''BertModel''': ['''BertModelTest'''],
}
__lowercase = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelTest'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTest'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTest'''],
'''BlipModel''': ['''BlipModelTest'''],
'''BlipTextModel''': ['''BlipTextModelTest'''],
'''BlipVisionModel''': ['''BlipVisionModelTest'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
def A ( self ) -> Dict:
'''simple docstring'''
__lowercase = get_model_to_tester_mapping(snake_case_ )
__lowercase = get_model_to_tester_mapping(snake_case_ )
__lowercase = {
'''BertForMaskedLM''': ['''BertModelTester'''],
'''BertForMultipleChoice''': ['''BertModelTester'''],
'''BertForNextSentencePrediction''': ['''BertModelTester'''],
'''BertForPreTraining''': ['''BertModelTester'''],
'''BertForQuestionAnswering''': ['''BertModelTester'''],
'''BertForSequenceClassification''': ['''BertModelTester'''],
'''BertForTokenClassification''': ['''BertModelTester'''],
'''BertLMHeadModel''': ['''BertModelTester'''],
'''BertModel''': ['''BertModelTester'''],
}
__lowercase = {
'''BlipForConditionalGeneration''': ['''BlipTextImageModelsModelTester'''],
'''BlipForImageTextRetrieval''': ['''BlipTextRetrievalModelTester'''],
'''BlipForQuestionAnswering''': ['''BlipVQAModelTester'''],
'''BlipModel''': ['''BlipModelTester'''],
'''BlipTextModel''': ['''BlipTextModelTester'''],
'''BlipVisionModel''': ['''BlipVisionModelTester'''],
}
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
self.assertEqual(get_test_info.to_json(snake_case_ ) , snake_case_ )
| 639
|
from maths.prime_factors import prime_factors
def lowercase_ ( _UpperCamelCase ):
'''simple docstring'''
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowercase = F'Input value of [number={number}] must be an integer'
raise TypeError(_UpperCamelCase )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(_UpperCamelCase ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 639
| 1
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class __A ( lowercase__ ):
UpperCamelCase = """umt5"""
UpperCamelCase = ["""past_key_values"""]
def __init__( self :Dict , __snake_case :str=25_01_12 , __snake_case :Tuple=5_12 , __snake_case :str=64 , __snake_case :List[Any]=10_24 , __snake_case :Dict=8 , __snake_case :List[str]=None , __snake_case :List[str]=6 , __snake_case :Dict=32 , __snake_case :Optional[int]=1_28 , __snake_case :int=0.1 , __snake_case :Dict=1E-6 , __snake_case :List[str]=1.0 , __snake_case :int="gated-gelu" , __snake_case :List[str]=True , __snake_case :Optional[int]=True , __snake_case :Optional[int]="T5Tokenizer" , __snake_case :List[str]=True , __snake_case :Any=0 , __snake_case :int=1 , __snake_case :List[Any]=0 , **__snake_case :List[Any] , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=__lowercase , tokenizer_class=__lowercase , tie_word_embeddings=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
__magic_name__ : List[str] =vocab_size
__magic_name__ : Optional[Any] =d_model
__magic_name__ : List[str] =d_kv
__magic_name__ : Dict =d_ff
__magic_name__ : str =num_layers
__magic_name__ : List[Any] =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__magic_name__ : int =num_heads
__magic_name__ : List[str] =relative_attention_num_buckets
__magic_name__ : Union[str, Any] =relative_attention_max_distance
__magic_name__ : Any =dropout_rate
__magic_name__ : Optional[Any] =layer_norm_epsilon
__magic_name__ : int =initializer_factor
__magic_name__ : int =feed_forward_proj
__magic_name__ : List[Any] =use_cache
__magic_name__ : List[Any] =self.feed_forward_proj.split("""-""" )
__magic_name__ : int =act_info[-1]
__magic_name__ : Any =act_info[0] == """gated"""
if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
__magic_name__ : int ="""gelu_new"""
@property
def A__ ( self :int ):
'''simple docstring'''
return self.d_model
@property
def A__ ( self :int ):
'''simple docstring'''
return self.num_heads
@property
def A__ ( self :str ):
'''simple docstring'''
return self.num_layers
class __A ( lowercase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Optional[Any] ={
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__magic_name__ : Union[str, Any] ="""past_encoder_sequence + sequence"""
__magic_name__ : Optional[Any] ={0: """batch"""}
__magic_name__ : int ={0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__magic_name__ : List[str] ={0: """batch""", 1: """decoder_sequence"""}
__magic_name__ : Union[str, Any] ={0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A__ ( self :Optional[Any] ):
'''simple docstring'''
return 13
@property
def A__ ( self :Optional[int] ):
'''simple docstring'''
return 5E-4
| 715
|
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
UpperCAmelCase_ : Dict = 637_8137.0
UpperCAmelCase_ : List[Any] = 635_6752.31_4245
UpperCAmelCase_ : List[str] = 6378137
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
__magic_name__ : str =(AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
__magic_name__ : str =atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
__magic_name__ : List[Any] =atan((1 - flattening) * tan(radians(lowerCamelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
__magic_name__ : List[Any] =haversine_distance(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
__magic_name__ : Tuple =(b_lata + b_lata) / 2
__magic_name__ : int =(b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
__magic_name__ : Optional[int] =(sin(lowerCamelCase ) ** 2) * (cos(lowerCamelCase ) ** 2)
__magic_name__ : Any =cos(sigma / 2 ) ** 2
__magic_name__ : List[Any] =(sigma - sin(lowerCamelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
__magic_name__ : Any =(cos(lowerCamelCase ) ** 2) * (sin(lowerCamelCase ) ** 2)
__magic_name__ : Optional[Any] =sin(sigma / 2 ) ** 2
__magic_name__ : str =(sigma + sin(lowerCamelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 367
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE :str = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
snake_case_ = "camembert"
def __init__( self : Tuple ,A : Dict=3_05_22 ,A : Any=7_68 ,A : List[Any]=12 ,A : Optional[int]=12 ,A : List[str]=30_72 ,A : Dict="gelu" ,A : Union[str, Any]=0.1 ,A : Union[str, Any]=0.1 ,A : int=5_12 ,A : Any=2 ,A : Optional[Any]=0.02 ,A : Optional[Any]=1E-12 ,A : Any=1 ,A : Optional[Any]=0 ,A : List[Any]=2 ,A : Dict="absolute" ,A : List[Any]=True ,A : Union[str, Any]=None ,**A : List[Any] ,):
super().__init__(pad_token_id=A ,bos_token_id=A ,eos_token_id=A ,**A )
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = hidden_act
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = type_vocab_size
__A = initializer_range
__A = layer_norm_eps
__A = position_embedding_type
__A = use_cache
__A = classifier_dropout
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self : int ):
if self.task == "multiple-choice":
__A = {0: "batch", 1: "choice", 2: "sequence"}
else:
__A = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 55
|
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class snake_case__ ( __A ):
def A ( self ) -> Any:
"""simple docstring"""
a_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def A ( self ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(UpperCamelCase_ ):
a_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def A ( self ) -> str:
"""simple docstring"""
with self.assertRaises(UpperCamelCase_ ):
a_ : int = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def A ( self ) -> Optional[Any]:
"""simple docstring"""
a_ : List[str] = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A ( self ) -> int:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a_ : Dict = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : str = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def A ( self ) -> str:
"""simple docstring"""
a_ : Dict = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def A ( self ) -> List[str]:
"""simple docstring"""
a_ : Optional[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def A ( self ) -> Tuple:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
a_ : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def A ( self ) -> str:
"""simple docstring"""
a_ : Union[str, Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def A ( self ) -> List[Any]:
"""simple docstring"""
a_ : int = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def A ( self ) -> Any:
"""simple docstring"""
import PIL.Image
a_ : Optional[Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=UpperCamelCase_ ) as mock_cast_to_python_objects:
a_ : List[Any] = pa.array(TypedSequence([{"""path""": None, """bytes""": B"""image_bytes"""}, pil_image] , type=Image() ) )
a_ , a_ : str = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , UpperCamelCase_ )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
a_ : Any = pa.BufferReader(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , pa.Buffer ) else pa.memory_map(SCREAMING_SNAKE_CASE_ )
a_ : Dict = pa.ipc.open_stream(SCREAMING_SNAKE_CASE_ )
a_ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
a_ : Dict = pa.BufferOutputStream()
a_ : List[str] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
a_ , a_ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : List[Any] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : Dict = pa.BufferOutputStream()
a_ : Dict = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
a_ , a_ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
a_ : Any = pa.BufferReader(output.getvalue() )
a_ : List[str] = pa.ipc.open_stream(SCREAMING_SNAKE_CASE_ )
a_ : pa.Table = f.read_all()
a_ : Optional[int] = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
a_ : Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="""split_name""" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
a_ , a_ : Any = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
a_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="""split_name""" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 )
a_ , a_ : Union[str, Any] = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="""split_name""" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
a_ , a_ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
a_ : List[str] = pa.BufferOutputStream()
a_ : List[str] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
a_ , a_ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
a_ : str = pa.BufferOutputStream()
a_ : Optional[Any] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
a_ , a_ : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
"""simple docstring"""
a_ : Union[str, Any] = pa.BufferOutputStream()
a_ : List[Any] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
a_ : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _lowerCamelCase ( ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a_ : str = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
a_ : List[str] = os.path.join(SCREAMING_SNAKE_CASE_ , """test.arrow""" )
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ , schema=pa.schema(SCREAMING_SNAKE_CASE_ ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(SCREAMING_SNAKE_CASE_ , 1 )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int ):
"""simple docstring"""
if pa.types.is_list(SCREAMING_SNAKE_CASE_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
if isinstance(lst[0] , SCREAMING_SNAKE_CASE_ ):
change_first_primitive_element_in_list(lst[0] , SCREAMING_SNAKE_CASE_ )
else:
a_ : Dict = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
a_ : str = pa.array(TypedSequence(SCREAMING_SNAKE_CASE_ , optimized_int_type=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : Optional[int] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE_ , col=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
a_ : str = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
a_ : Any = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
a_ : List[str] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE_ , col=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
a_ : Optional[int] = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ):
"""simple docstring"""
a_ : Any = """mock://dataset-train.arrow"""
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(SCREAMING_SNAKE_CASE_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
a_ , a_ : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : int = pa.BufferOutputStream()
with ParquetWriter(stream=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
a_ , a_ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
a_ : List[str] = pa.BufferReader(output.getvalue() )
a_ : pa.Table = pq.read_table(SCREAMING_SNAKE_CASE_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] ):
"""simple docstring"""
import PIL.Image
a_ : Optional[Any] = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(SCREAMING_SNAKE_CASE_ , format="""png""" )
a_ : Tuple = pa.BufferOutputStream()
with ParquetWriter(
stream=SCREAMING_SNAKE_CASE_ , features=Features({"""image""": Image()} ) , embed_local_files=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
a_ : int = pa.BufferReader(output.getvalue() )
a_ : pa.Table = pq.read_table(SCREAMING_SNAKE_CASE_ )
a_ : str = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _lowerCamelCase ( ):
"""simple docstring"""
a_ : int = pa.schema([pa.field("""col_1""" , pa.string() , nullable=SCREAMING_SNAKE_CASE_ )] )
a_ : Tuple = pa.BufferOutputStream()
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ ) as writer:
writer._build_writer(inferred_schema=SCREAMING_SNAKE_CASE_ )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
| 419
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
def __init__( self : int , _A : pyspark.sql.DataFrame , _A : Optional[NamedSplit] = None , _A : Optional[Features] = None , _A : bool = True , _A : str = None , _A : bool = False , _A : str = None , _A : bool = True , _A : str = "arrow" , **_A : List[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(
split=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ , streaming=UpperCAmelCase__ , **UpperCAmelCase__ , )
snake_case_ : List[str] = load_from_cache_file
snake_case_ : Any = file_format
snake_case_ : int = Spark(
df=UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , working_dir=UpperCAmelCase__ , **UpperCAmelCase__ , )
def UpperCAmelCase_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
snake_case_ : Tuple = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase__ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 721
|
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_SCREAMING_SNAKE_CASE = Path(__file__).resolve().parents[3] / """src"""
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_SCREAMING_SNAKE_CASE = {"""base""": """patrickvonplaten/wav2vec2_tiny_random""", """robust""": """patrickvonplaten/wav2vec2_tiny_random_robust"""}
_SCREAMING_SNAKE_CASE = """zero2"""
_SCREAMING_SNAKE_CASE = """zero3"""
_SCREAMING_SNAKE_CASE = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
snake_case_ : Tuple = parameterized.to_safe_name('_'.join(str(__a ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
_SCREAMING_SNAKE_CASE = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class SCREAMING_SNAKE_CASE_ ( snake_case_ ):
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : Tuple , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@require_torch_multi_gpu
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : int , _A : Any , _A : int ) -> Dict:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : str , _A : List[str] , _A : str ) -> Tuple:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
@require_torch_multi_gpu
@parameterized.expand(_A , name_func=_A )
def UpperCAmelCase_ ( self : Optional[int] , _A : int , _A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
self.run_and_check(
stage=_A , model=_A , distributed=_A , fpaa=_A , )
def UpperCAmelCase_ ( self : Any , _A : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase_ ( self : List[str] , _A : str , _A : str , _A : int = 10 , _A : bool = True , _A : bool = True , _A : bool = True , ) -> Any:
"""simple docstring"""
snake_case_ : Dict = models[model]
snake_case_ : str = self.run_trainer(
stage=_A , model_name=_A , eval_steps=_A , num_train_epochs=1 , distributed=_A , fpaa=_A , )
self.do_checks(_A )
return output_dir
def UpperCAmelCase_ ( self : Any , _A : str , _A : str , _A : int = 10 , _A : int = 1 , _A : bool = True , _A : bool = True , ) -> Dict:
"""simple docstring"""
snake_case_ : str = self.get_auto_remove_tmp_dir('./xxx' , after=_A )
snake_case_ : Tuple = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(_A )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(['--fp16'] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
snake_case_ : Any = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
snake_case_ : str = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
snake_case_ : int = self.get_launcher(_A )
snake_case_ : List[str] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_A , env=self.get_env() )
return output_dir
def UpperCAmelCase_ ( self : Optional[Any] , _A : Optional[Any]=False ) -> List[str]:
"""simple docstring"""
snake_case_ : str = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split()
| 534
| 0
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE = {
"configuration_bridgetower": [
"BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BridgeTowerConfig",
"BridgeTowerTextConfig",
"BridgeTowerVisionConfig",
],
"processing_bridgetower": ["BridgeTowerProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["BridgeTowerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST",
"BridgeTowerForContrastiveLearning",
"BridgeTowerForImageAndTextRetrieval",
"BridgeTowerForMaskedLM",
"BridgeTowerModel",
"BridgeTowerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 366
|
'''simple docstring'''
import numpy as np
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> np.array:
return 1 / (1 + np.exp(-vector ))
def lowerCamelCase( SCREAMING_SNAKE_CASE_ ) -> np.array:
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 366
| 1
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("socket.socket" )
@patch("builtins.open" )
def __UpperCamelCase ( _UpperCAmelCase, _UpperCAmelCase ):
# ===== initialization =====
__UpperCAmelCase : str = Mock()
__UpperCAmelCase : List[str] = conn, Mock()
__UpperCAmelCase : List[Any] = iter([1, None] )
__UpperCAmelCase : Union[str, Any] = lambda _UpperCAmelCase : next(_UpperCAmelCase )
# ===== invoke =====
send_file(filename="mytext.txt", testing=_UpperCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 329
|
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
lowerCAmelCase__ : str = "path-to-your-trained-model"
lowerCAmelCase__ : Any = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to("cuda")
lowerCAmelCase__ : Tuple = "A photo of sks dog in a bucket"
lowerCAmelCase__ : Any = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save("dog-bucket.png")
| 329
| 1
|
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Union[str, Any]=13 , lowerCAmelCase__ : List[str]=30 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Optional[Any]=3 , lowerCAmelCase__ : Dict=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Tuple=32 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : List[str]=37 , lowerCAmelCase__ : Optional[Any]="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[Any]=0.1 , lowerCAmelCase__ : str=10 , lowerCAmelCase__ : Dict=0.02 , lowerCAmelCase__ : int=3 , lowerCAmelCase__ : Any=0.6 , lowerCAmelCase__ : Optional[Any]=None , ) -> str:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = image_size
_UpperCamelCase = patch_size
_UpperCamelCase = num_channels
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = mask_ratio
_UpperCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCamelCase = (image_size // patch_size) ** 2
_UpperCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
_UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModel(config=lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = TFViTMAEForPreTraining(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
# expected sequence length = num_patches
_UpperCamelCase = (self.image_size // self.patch_size) ** 2
_UpperCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCamelCase = 1
_UpperCamelCase = TFViTMAEForPreTraining(lowerCAmelCase__ )
_UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
_UpperCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase = self.prepare_config_and_inputs()
((_UpperCamelCase) , (_UpperCamelCase) , (_UpperCamelCase)) = config_and_inputs
_UpperCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
_snake_case : int = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
_snake_case : Any = False
_snake_case : Dict = False
_snake_case : Any = False
_snake_case : List[Any] = False
def snake_case__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37 )
def snake_case__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self : Any ) -> Tuple:
'''simple docstring'''
pass
def snake_case__ ( self : Dict ) -> Dict:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase = [*signature.parameters.keys()]
_UpperCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def snake_case__ ( self : List[Any] ) -> int:
'''simple docstring'''
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__ )
def snake_case__ ( self : str ) -> List[str]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = copy.deepcopy(self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = outputs_dict[0].numpy()
_UpperCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self : Dict ) -> int:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCAmelCase__ : Optional[Any] ):
_UpperCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCAmelCase__ ):
_UpperCamelCase = v.numpy()
else:
_UpperCamelCase = np.array(lowerCAmelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = prepare_numpy_arrays(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase = tf.constant(lowerCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCamelCase = tf_noise
super().check_pt_tf_models(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCAmelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCAmelCase__ , lowerCAmelCase__ ),)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCAmelCase__ , '''_keras_serializable''' , lowerCAmelCase__ )
}
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCamelCase = tf.convert_to_tensor(lowerCAmelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_UpperCamelCase = main_layer_class(lowerCAmelCase__ )
_UpperCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_UpperCamelCase = tf.keras.Model(lowerCAmelCase__ , outputs=main_layer(lowerCAmelCase__ ) )
_UpperCamelCase = model(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase = os.path.join(lowerCAmelCase__ , '''keras_model.h5''' )
model.save(lowerCAmelCase__ )
_UpperCamelCase = tf.keras.models.load_model(
lowerCAmelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCAmelCase__ , tf.keras.Model )
_UpperCamelCase = model(lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
@slow
def snake_case__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_UpperCamelCase = outputs.last_hidden_state.numpy()
_UpperCamelCase = 0
else:
_UpperCamelCase = outputs.logits.numpy()
_UpperCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase__ , saved_model=lowerCAmelCase__ )
_UpperCamelCase = model_class.from_pretrained(lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_UpperCamelCase = after_outputs['''last_hidden_state'''].numpy()
_UpperCamelCase = 0
else:
_UpperCamelCase = after_outputs['''logits'''].numpy()
_UpperCamelCase = 0
_UpperCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCAmelCase__ , 1e-5 )
def snake_case__ ( self : str ) -> List[Any]:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCamelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
_UpperCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCAmelCase__ )
_UpperCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_UpperCamelCase = model_class.from_config(model.config )
_UpperCamelCase = new_model(lowerCAmelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_UpperCamelCase = new_model(lowerCAmelCase__ , noise=lowerCAmelCase__ )
self.assert_outputs_same(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
pass
@slow
def snake_case__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCAmelCase__ )
def a__ ( ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def snake_case__ ( self : int ) -> Any:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
np.random.seed(2 )
_UpperCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_UpperCamelCase = self.default_image_processor
_UpperCamelCase = prepare_img()
_UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCamelCase = ViTMAEConfig()
_UpperCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_UpperCamelCase = model(**lowerCAmelCase__ , noise=lowerCAmelCase__ )
# verify the logits
_UpperCamelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
_UpperCamelCase = tf.convert_to_tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1e-4 )
| 98
|
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) , end='\t' )
else:
print('INF' , end='\t' )
print()
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_A = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
_A = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
_A = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
__A : Dict = int(input("Enter number of vertices: "))
__A : Union[str, Any] = int(input("Enter number of edges: "))
__A : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__A : List[Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__A : Union[str, Any] = int(input("Enter source:"))
__A : List[str] = int(input("Enter destination:"))
__A : Union[str, Any] = float(input("Enter weight:"))
__A : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27
| 0
|
"""simple docstring"""
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _lowerCamelCase( a , a , a , a , ):
__a = coefficient_matrix.shape
__a = constant_matrix.shape
if rowsa != colsa:
__a = F"Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"
raise ValueError(_A )
if colsa != 1:
__a = F"Constant matrix must be nx1 but received {rowsa}x{colsa}"
raise ValueError(_A )
if rowsa != rowsa:
__a = (
"Coefficient and constant matrices dimensions must be nxn and nx1 but "
F"received {rowsa}x{colsa} and {rowsa}x{colsa}"
)
raise ValueError(_A )
if len(_A ) != rowsa:
__a = (
"Number of initial values must be equal to number of rows in coefficient "
F"matrix but received {len(_A )} and {rowsa}"
)
raise ValueError(_A )
if iterations <= 0:
raise ValueError("Iterations must be at least 1" )
__a = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__a = table.shape
strictly_diagonally_dominant(_A )
# Iterates the whole matrix for given number of times
for _ in range(_A ):
__a = []
for row in range(_A ):
__a = 0
for col in range(_A ):
if col == row:
__a = table[row][col]
elif col == cols - 1:
__a = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__a = (temp + val) / denom
new_val.append(_A )
__a = new_val
return [float(_A ) for i in new_val]
def _lowerCamelCase( a ):
__a = table.shape
__a = True
for i in range(0 , _A ):
__a = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError("Coefficient matrix is not strictly diagonally dominant" )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705
|
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE__:List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__:Optional[Any] = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class snake_case__ ( snake_case_ ):
_snake_case : str = """blenderbot-small"""
_snake_case : str = ["""past_key_values"""]
_snake_case : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self , lowerCamelCase=50265 , lowerCamelCase=512 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=8 , lowerCamelCase=2048 , lowerCamelCase=16 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase="gelu" , lowerCamelCase=512 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1 , lowerCamelCase=False , lowerCamelCase=0 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=2 , **lowerCamelCase , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class snake_case__ ( snake_case_ ):
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: "batch"}
__a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
__a = {0: "batch", 1: "decoder_sequence"}
__a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def a__ ( self ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(lowerCamelCase , self ).outputs
if self.use_past:
__a , __a = self.num_layers
for i in range(lowerCamelCase ):
__a = {0: "batch", 2: "past_sequence + sequence"}
__a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
__a = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
__a = common_inputs["decoder_input_ids"].shape[1]
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a , __a = self.num_layers
__a = min(lowerCamelCase , lowerCamelCase )
__a = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
__a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a , __a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a , __a = self.num_layers
__a , __a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs["attention_mask"].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
__a = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(lowerCamelCase )
__a = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
__a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase = -1 , lowerCamelCase = -1 , lowerCamelCase = False , lowerCamelCase = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def a__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
__a = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 67
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
__magic_name__ : List[Any] =XCLIPTextConfig()
# derive patch size from model name
__magic_name__ : int =model_name.find("""patch""" )
__magic_name__ : str =int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] )
__magic_name__ : Dict =XCLIPVisionConfig(patch_size=lowerCamelCase , num_frames=lowerCamelCase )
if "large" in model_name:
__magic_name__ : int =768
__magic_name__ : Tuple =3072
__magic_name__ : str =12
__magic_name__ : Optional[Any] =1024
__magic_name__ : List[str] =4096
__magic_name__ : Union[str, Any] =16
__magic_name__ : Union[str, Any] =24
__magic_name__ : Tuple =768
__magic_name__ : Union[str, Any] =3072
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ : Dict =336
__magic_name__ : Any =XCLIPConfig.from_text_vision_configs(lowerCamelCase , lowerCamelCase )
if "large" in model_name:
__magic_name__ : int =768
return config
def lowerCAmelCase_ ( lowerCamelCase ):
# text encoder
if name == "token_embedding.weight":
__magic_name__ : int =name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" )
if name == "positional_embedding":
__magic_name__ : Union[str, Any] =name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" )
if "ln_1" in name:
__magic_name__ : Union[str, Any] =name.replace("""ln_1""" , """layer_norm1""" )
if "ln_2" in name:
__magic_name__ : int =name.replace("""ln_2""" , """layer_norm2""" )
if "c_fc" in name:
__magic_name__ : Optional[Any] =name.replace("""c_fc""" , """fc1""" )
if "c_proj" in name:
__magic_name__ : Any =name.replace("""c_proj""" , """fc2""" )
if name.startswith("""transformer.resblocks""" ):
__magic_name__ : List[str] =name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ : Optional[Any] =name.replace("""attn.out_proj""" , """self_attn.out_proj""" )
if "ln_final" in name:
__magic_name__ : str =name.replace("""ln_final""" , """text_model.final_layer_norm""" )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ : Optional[int] =name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" )
if name == "visual.positional_embedding":
__magic_name__ : Dict =name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" )
if name.startswith("""visual.transformer.resblocks""" ):
__magic_name__ : Optional[int] =name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" )
if "visual.conv1" in name:
__magic_name__ : Any =name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" )
if "visual.ln_pre" in name:
__magic_name__ : Optional[Any] =name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" )
if "visual.ln_post" in name:
__magic_name__ : Optional[int] =name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" )
if "visual.proj" in name:
__magic_name__ : List[Any] =name.replace("""visual.proj""" , """visual_projection.weight""" )
if "text_projection" in name:
__magic_name__ : Optional[int] =name.replace("""text_projection""" , """text_projection.weight""" )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ : Union[str, Any] =name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" )
if "prompts_visual_ln" in name:
__magic_name__ : List[Any] =name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" )
# mit
if name == "mit.positional_embedding":
__magic_name__ : Union[str, Any] =name.replace("""positional""" , """position""" )
if name.startswith("""mit.resblocks""" ):
__magic_name__ : Union[str, Any] =name.replace("""mit.resblocks""" , """mit.encoder.layers""" )
# prompts generator
if name.startswith("""prompts_generator.norm""" ):
__magic_name__ : int =name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" )
return name
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase ):
for key in orig_state_dict.copy().keys():
__magic_name__ : Dict =orig_state_dict.pop(lowerCamelCase )
if "attn.in_proj" in key:
__magic_name__ : Any =key.split(""".""" )
if key.startswith("""visual""" ):
__magic_name__ : Tuple =key_split[3]
__magic_name__ : Dict =config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ : Optional[Any] =val[
:dim, :
]
__magic_name__ : Optional[Any] =val[
dim : dim * 2, :
]
__magic_name__ : Any =val[
-dim:, :
]
else:
__magic_name__ : Union[str, Any] =val[
:dim
]
__magic_name__ : Union[str, Any] =val[
dim : dim * 2
]
__magic_name__ : int =val[
-dim:
]
else:
if "weight" in key:
__magic_name__ : str =val[
:dim, :
]
__magic_name__ : Any =val[
dim : dim * 2, :
]
__magic_name__ : Any =val[
-dim:, :
]
else:
__magic_name__ : Any =val[:dim]
__magic_name__ : List[str] =val[
dim : dim * 2
]
__magic_name__ : Any =val[-dim:]
elif key.startswith("""mit""" ):
__magic_name__ : Dict =key_split[2]
__magic_name__ : str =config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ : Optional[int] =val[:dim, :]
__magic_name__ : Union[str, Any] =val[dim : dim * 2, :]
__magic_name__ : int =val[-dim:, :]
else:
__magic_name__ : List[Any] =val[:dim]
__magic_name__ : Tuple =val[dim : dim * 2]
__magic_name__ : Any =val[-dim:]
else:
__magic_name__ : Union[str, Any] =key_split[2]
__magic_name__ : int =config.text_config.hidden_size
if "weight" in key:
__magic_name__ : List[str] =val[:dim, :]
__magic_name__ : List[str] =val[
dim : dim * 2, :
]
__magic_name__ : List[Any] =val[-dim:, :]
else:
__magic_name__ : Optional[Any] =val[:dim]
__magic_name__ : str =val[
dim : dim * 2
]
__magic_name__ : Optional[int] =val[-dim:]
else:
__magic_name__ : Optional[int] =rename_key(lowerCamelCase )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ : Any =val.T
__magic_name__ : Optional[Any] =val
return orig_state_dict
def lowerCAmelCase_ ( lowerCamelCase ):
if num_frames == 8:
__magic_name__ : Union[str, Any] ="""eating_spaghetti_8_frames.npy"""
elif num_frames == 16:
__magic_name__ : Union[str, Any] ="""eating_spaghetti.npy"""
elif num_frames == 32:
__magic_name__ : Union[str, Any] ="""eating_spaghetti_32_frames.npy"""
__magic_name__ : str =hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename=lowerCamelCase , repo_type="""dataset""" , )
__magic_name__ : Union[str, Any] =np.load(lowerCamelCase )
return list(lowerCamelCase )
def lowerCAmelCase_ ( lowerCamelCase , lowerCamelCase=None , lowerCamelCase=False ):
__magic_name__ : Tuple ={
# fully supervised kinetics-400 checkpoints
"""xclip-base-patch32""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth""",
"""xclip-base-patch32-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"""
),
"""xclip-base-patch16""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth""",
"""xclip-base-patch16-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"""
),
"""xclip-large-patch14""": """https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb""",
"""xclip-large-patch14-16-frames""": """https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f""",
# fully supervised kinetics-600 checkpoints
"""xclip-base-patch16-kinetics-600""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"""
),
"""xclip-base-patch16-kinetics-600-16-frames""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"""
),
"""xclip-large-patch14-kinetics-600""": """https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be""",
# few shot
"""xclip-base-patch16-hmdb-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"""
),
"""xclip-base-patch16-hmdb-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"""
),
"""xclip-base-patch16-hmdb-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"""
),
"""xclip-base-patch16-hmdb-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"""
),
"""xclip-base-patch16-ucf-2-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"""
),
"""xclip-base-patch16-ucf-4-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"""
),
"""xclip-base-patch16-ucf-8-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"""
),
"""xclip-base-patch16-ucf-16-shot""": (
"""https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"""
),
# zero shot
"""xclip-base-patch16-zero-shot""": """https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth""",
}
__magic_name__ : List[str] =model_to_url[model_name]
__magic_name__ : List[Any] =8
if "16-frames" in model_name:
__magic_name__ : Any =16
elif "shot" in model_name:
__magic_name__ : Optional[int] =32
__magic_name__ : Union[str, Any] =get_xclip_config(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =XCLIPModel(lowerCamelCase )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ : Optional[int] ="""pytorch_model.bin"""
gdown.cached_download(lowerCamelCase , lowerCamelCase , quiet=lowerCamelCase )
__magic_name__ : Optional[int] =torch.load(lowerCamelCase , map_location="""cpu""" )["""model"""]
else:
__magic_name__ : Optional[Any] =torch.hub.load_state_dict_from_url(lowerCamelCase )["""model"""]
__magic_name__ : Optional[Any] =convert_state_dict(lowerCamelCase , lowerCamelCase )
__magic_name__ : Any =XCLIPModel(lowerCamelCase )
__magic_name__ , __magic_name__ : Dict =model.load_state_dict(lowerCamelCase , strict=lowerCamelCase )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ : str =336 if model_name == """xclip-large-patch14-16-frames""" else 224
__magic_name__ : Union[str, Any] =VideoMAEImageProcessor(size=lowerCamelCase )
__magic_name__ : Any =CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" )
__magic_name__ : List[str] =CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" )
__magic_name__ : Tuple =XCLIPProcessor(image_processor=lowerCamelCase , tokenizer=lowerCamelCase )
__magic_name__ : str =prepare_video(lowerCamelCase )
__magic_name__ : List[Any] =processor(
text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=lowerCamelCase , return_tensors="""pt""" , padding=lowerCamelCase )
print("""Shape of pixel values:""" , inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ : Any =model(**lowerCamelCase )
# Verify outputs
__magic_name__ : Dict =outputs.logits_per_video
__magic_name__ : Optional[Any] =logits_per_video.softmax(dim=1 )
print("""Probs:""" , lowerCamelCase )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ : Union[str, Any] =torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ : Tuple =torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] )
elif model_name == "xclip-base-patch16":
__magic_name__ : str =torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ : List[Any] =torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] )
elif model_name == "xclip-large-patch14":
__magic_name__ : Optional[int] =torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ : Union[str, Any] =torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ : Any =torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ : str =torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ : Dict =torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ : str =torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ : Tuple =torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ : List[str] =torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ : List[str] =torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ : Optional[Any] =torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ : int =torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ : Optional[Any] =torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ : Any =torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ : int =torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] )
else:
raise ValueError(F"Model name {model_name} not supported" )
assert torch.allclose(lowerCamelCase , lowerCamelCase , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCamelCase )
if push_to_hub:
print("""Pushing model, processor and slow tokenizer files to the hub...""" )
model.push_to_hub(lowerCamelCase , organization="""nielsr""" )
processor.push_to_hub(lowerCamelCase , organization="""nielsr""" )
slow_tokenizer.push_to_hub(lowerCamelCase , organization="""nielsr""" )
if __name__ == "__main__":
UpperCAmelCase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="xclip-base-patch32",
type=str,
help="Name of the model.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 21
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
snake_case__ = logging.get_logger(__name__)
class lowerCAmelCase_ ( _a):
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : float , **__A : Tuple ) ->Union[str, Any]:
"""simple docstring"""
a__ :Any = feature_size
a__ :int = sampling_rate
a__ :List[str] = padding_value
a__ :str = kwargs.pop("padding_side" , "right" )
a__ :Any = kwargs.pop("return_attention_mask" , __A )
super().__init__(**__A )
def _snake_case ( self : str , __A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __A : Union[bool, str, PaddingStrategy] = True , __A : Optional[int] = None , __A : bool = False , __A : Optional[int] = None , __A : Optional[bool] = None , __A : Optional[Union[str, TensorType]] = None , ) ->BatchFeature:
"""simple docstring"""
if isinstance(__A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
a__ :Dict = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F''' to this method that includes {self.model_input_names[0]}, but you provided'''
F''' {list(processed_features.keys() )}''' )
a__ :int = processed_features[self.model_input_names[0]]
a__ :str = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__A ) == 0:
if return_attention_mask:
a__ :Optional[int] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
a__ :int = required_input[0]
if isinstance(__A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
a__ :Any = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__A ):
a__ :Tuple = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__A ):
a__ :Optional[Any] = "tf"
elif is_torch_tensor(__A ):
a__ :Optional[Any] = "pt"
elif isinstance(__A , (int, float, list, tuple, np.ndarray) ):
a__ :Union[str, Any] = "np"
else:
raise ValueError(
F'''type of {first_element} unknown: {type(__A )}. '''
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
a__ :Optional[Any] = to_numpy(__A )
else:
a__ :Union[str, Any] = [to_numpy(__A ) for v in value]
# Convert padding_strategy in PaddingStrategy
a__ :Optional[int] = self._get_padding_strategies(padding=__A , max_length=__A )
a__ :int = processed_features[self.model_input_names[0]]
a__ :Union[str, Any] = len(__A )
if not all(len(__A ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
a__ :int = []
for i in range(__A ):
a__ :Optional[int] = {k: v[i] for k, v in processed_features.items()}
# truncation
a__ :Tuple = self._truncate(
__A , max_length=__A , pad_to_multiple_of=__A , truncation=__A , )
truncated_inputs.append(__A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
a__ :str = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
a__ :str = PaddingStrategy.MAX_LENGTH
a__ :str = {}
for i in range(__A ):
# padding
a__ :Optional[int] = self._pad(
truncated_inputs[i] , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
for key, value in outputs.items():
if key not in batch_outputs:
a__ :Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
a__ :List[Any] = value.astype(np.floataa )
batch_outputs[key].append(__A )
return BatchFeature(__A , tensor_type=__A )
def _snake_case ( self : List[Any] , __A : Union[Dict[str, np.ndarray], BatchFeature] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ) ->dict:
"""simple docstring"""
a__ :Union[str, Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
a__ :List[Any] = len(__A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ :List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ :int = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
a__ :Dict = np.ones(len(__A ) , dtype=np.intaa )
if needs_to_be_padded:
a__ :List[str] = max_length - len(__A )
if self.padding_side == "right":
if return_attention_mask:
a__ :List[Any] = np.pad(
processed_features["attention_mask"] , (0, difference) )
a__ :List[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
a__ :Any = np.pad(
__A , __A , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
a__ :Dict = np.pad(
processed_features["attention_mask"] , (difference, 0) )
a__ :List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
a__ :List[str] = np.pad(
__A , __A , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def _snake_case ( self : Tuple , __A : Union[Dict[str, np.ndarray], BatchFeature] , __A : Optional[int] = None , __A : Optional[int] = None , __A : Optional[bool] = None , ) ->Optional[Any]:
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
a__ :str = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
a__ :Tuple = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
a__ :List[Any] = len(__A ) > max_length
if needs_to_be_truncated:
a__ :Optional[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
a__ :List[Any] = processed_features["attention_mask"][:max_length]
return processed_features
def _snake_case ( self : Union[str, Any] , __A : int=False , __A : Dict=None ) ->Optional[int]:
"""simple docstring"""
if padding is not False:
if padding is True:
a__ :Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__A , __A ):
a__ :Union[str, Any] = PaddingStrategy(__A )
elif isinstance(__A , __A ):
a__ :Union[str, Any] = padding
else:
a__ :Optional[Any] = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 395
| 0
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowercase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowercase : Tuple = 256
class __lowercase ( _A ):
lowerCamelCase : Union[str, Any] = ["melgan"]
def __init__(self , A , A , A , A , A , ) -> None:
super().__init__()
# From MELGAN
lowerCamelCase_ : int = math.log(1E-5 ) # Matches MelGAN training.
lowerCamelCase_ : List[Any] = 4.0 # Largest value for most examples
lowerCamelCase_ : List[str] = 1_2_8
self.register_modules(
notes_encoder=A , continuous_encoder=A , decoder=A , scheduler=A , melgan=A , )
def UpperCAmelCase__ (self , A , A=(-1.0, 1.0) , A=False ) -> Any:
lowerCamelCase_ : Optional[int] = output_range
if clip:
lowerCamelCase_ : List[str] = torch.clip(A , self.min_value , self.max_value )
# Scale to [0, 1].
lowerCamelCase_ : Tuple = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def UpperCAmelCase__ (self , A , A=(-1.0, 1.0) , A=False ) -> Any:
lowerCamelCase_ : Union[str, Any] = input_range
lowerCamelCase_ : Any = torch.clip(A , A , A ) if clip else outputs
# Scale to [0, 1].
lowerCamelCase_ : Optional[Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def UpperCAmelCase__ (self , A , A , A ) -> Optional[int]:
lowerCamelCase_ : Optional[int] = input_tokens > 0
lowerCamelCase_ : List[Any] = self.notes_encoder(
encoder_input_tokens=A , encoder_inputs_mask=A )
lowerCamelCase_ : List[Any] = self.continuous_encoder(
encoder_inputs=A , encoder_inputs_mask=A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def UpperCAmelCase__ (self , A , A , A ) -> Union[str, Any]:
lowerCamelCase_ : Tuple = noise_time
if not torch.is_tensor(A ):
lowerCamelCase_ : Tuple = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(A ) and len(timesteps.shape ) == 0:
lowerCamelCase_ : str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowerCamelCase_ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowerCamelCase_ : str = self.decoder(
encodings_and_masks=A , decoder_input_tokens=A , decoder_noise_time=A )
return logits
@torch.no_grad()
def __call__(self , A , A = None , A = 1_0_0 , A = True , A = "numpy" , A = None , A = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
F"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
F""" {type(A )}.""" )
lowerCamelCase_ : List[Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowerCamelCase_ : Any = np.zeros([1, 0, self.n_dims] , np.floataa )
lowerCamelCase_ : List[str] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
for i, encoder_input_tokens in enumerate(A ):
if i == 0:
lowerCamelCase_ : int = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowerCamelCase_ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowerCamelCase_ : List[Any] = ones
lowerCamelCase_ : List[Any] = self.scale_features(
A , output_range=[-1.0, 1.0] , clip=A )
lowerCamelCase_ : List[Any] = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=A , continuous_mask=A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowerCamelCase_ : List[str] = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowerCamelCase_ : List[str] = self.decode(
encodings_and_masks=A , input_tokens=A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowerCamelCase_ : str = self.scheduler.step(A , A , A , generator=A ).prev_sample
lowerCamelCase_ : Any = self.scale_to_features(A , input_range=[-1.0, 1.0] )
lowerCamelCase_ : Union[str, Any] = mel[:1]
lowerCamelCase_ : Tuple = mel.cpu().float().numpy()
lowerCamelCase_ : List[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A )
logger.info('''Generated segment''' , A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowerCamelCase_ : str = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowerCamelCase_ : Any = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=A )
| 713
|
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __lowercase :
def __init__(self , A , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Dict = 1_3
lowerCamelCase_ : Any = 7
lowerCamelCase_ : Dict = 3_0
lowerCamelCase_ : Optional[Any] = self.seq_length + self.mem_len
lowerCamelCase_ : Tuple = 1_5
lowerCamelCase_ : Tuple = True
lowerCamelCase_ : List[Any] = True
lowerCamelCase_ : Dict = 9_9
lowerCamelCase_ : Any = [1_0, 5_0, 8_0]
lowerCamelCase_ : List[str] = 3_2
lowerCamelCase_ : Tuple = 3_2
lowerCamelCase_ : Optional[Any] = 4
lowerCamelCase_ : Union[str, Any] = 8
lowerCamelCase_ : Dict = 1_2_8
lowerCamelCase_ : Dict = 2
lowerCamelCase_ : Tuple = 2
lowerCamelCase_ : Any = None
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : Union[str, Any] = 0
lowerCamelCase_ : str = 3
lowerCamelCase_ : Dict = self.vocab_size - 1
lowerCamelCase_ : Dict = 0.01
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : Union[str, Any] = None
if self.use_labels:
lowerCamelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ : str = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCAmelCase__ (self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : int = TFTransfoXLModel(A )
lowerCamelCase_, lowerCamelCase_ : Tuple = model(A ).to_tuple()
lowerCamelCase_ : Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowerCamelCase_, lowerCamelCase_ : Any = model(A ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : List[Any] = TFTransfoXLLMHeadModel(A )
lowerCamelCase_, lowerCamelCase_ : List[Any] = model(A ).to_tuple()
lowerCamelCase_ : Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = model(A ).to_tuple()
lowerCamelCase_, lowerCamelCase_ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowerCamelCase_ : List[str] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowerCamelCase_, lowerCamelCase_ : str = model(A ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCAmelCase__ (self , A , A , A , A ):
lowerCamelCase_ : Dict = TFTransfoXLForSequenceClassification(A )
lowerCamelCase_ : Optional[int] = model(A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = self.prepare_config_and_inputs()
((lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_), (lowerCamelCase_)) : List[Any] = config_and_inputs
lowerCamelCase_ : Tuple = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __lowercase ( _lowercase , _lowercase , unittest.TestCase ):
lowerCamelCase : Any = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowerCamelCase : Tuple = () if is_tf_available() else ()
lowerCamelCase : Any = (
{
"feature-extraction": TFTransfoXLModel,
"text-classification": TFTransfoXLForSequenceClassification,
"text-generation": TFTransfoXLLMHeadModel,
"zero-shot": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowerCamelCase : str = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Dict = False
lowerCamelCase : Optional[int] = False
def UpperCAmelCase__ (self , A , A , A , A , A ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[Any] = TFTransfoXLModelTester(self )
lowerCamelCase_ : Optional[Any] = ConfigTester(self , config_class=A , d_embed=3_7 )
def UpperCAmelCase__ (self ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*A )
def UpperCAmelCase__ (self ):
self.model_tester.set_seed()
lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase_ : List[Any] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowerCamelCase_ : Optional[Any] = model_class(A )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowerCamelCase_ : str = model.get_output_embeddings()
assert isinstance(A , tf.keras.layers.Layer )
lowerCamelCase_ : int = model.get_bias()
assert name is None
else:
lowerCamelCase_ : Tuple = model.get_output_embeddings()
assert x is None
lowerCamelCase_ : int = model.get_bias()
assert name is None
def UpperCAmelCase__ (self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCAmelCase__ (self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Optional[Any] = TFTransfoXLModel.from_pretrained(A )
self.assertIsNotNone(A )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCAmelCase__ (self ):
pass
@require_tf
class __lowercase ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowerCamelCase_ : Optional[Any] = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowerCamelCase_ : Union[str, Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowerCamelCase_ : Optional[Any] = model.generate(A , max_length=2_0_0 , do_sample=A )
self.assertListEqual(output_ids[0].numpy().tolist() , A )
| 357
| 0
|
# flake8: noqa
# Lint as: python3
lowerCamelCase__ : Tuple = [
"""VerificationMode""",
"""Version""",
"""disable_progress_bar""",
"""enable_progress_bar""",
"""is_progress_bar_enabled""",
"""experimental""",
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 12
|
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class a ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def UpperCAmelCase_ ( self : Tuple ) -> int:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = 5
# Realm tok
__lowercase = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__lowercase = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
__lowercase = os.path.join(lowerCamelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
__lowercase = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__ )
def UpperCAmelCase_ ( self : List[str] ) -> RealmTokenizer:
"""simple docstring"""
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def UpperCAmelCase_ ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
__lowercase = RealmConfig(num_block_records=self.num_block_records )
return config
def UpperCAmelCase_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
__lowercase = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def UpperCAmelCase_ ( self : int ) -> List[Any]:
"""simple docstring"""
__lowercase = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=lowerCamelCase__ , )
return block_records
def UpperCAmelCase_ ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
__lowercase = self.get_config()
__lowercase = self.get_dummy_retriever()
__lowercase = retriever.tokenizer
__lowercase = np.array([0, 3] , dtype='''long''' )
__lowercase = tokenizer(['''Test question'''] ).input_ids
__lowercase = tokenizer(
['''the fourth'''] , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ).input_ids
__lowercase = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase = retriever(
lowerCamelCase__ , lowerCamelCase__ , answer_ids=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors='''np''' )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(len(lowerCamelCase__ ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def UpperCAmelCase_ ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_config()
__lowercase = self.get_dummy_retriever()
__lowercase = retriever.tokenizer
__lowercase = np.array([0, 3, 5] , dtype='''long''' )
__lowercase = tokenizer(['''Test question'''] ).input_ids
__lowercase = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , ).input_ids
__lowercase = config.reader_seq_len
__lowercase , __lowercase , __lowercase , __lowercase = retriever(
lowerCamelCase__ , lowerCamelCase__ , answer_ids=lowerCamelCase__ , max_length=lowerCamelCase__ , return_tensors='''np''' )
self.assertEqual([False, True, True] , lowerCamelCase__ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , lowerCamelCase__ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , lowerCamelCase__ )
def UpperCAmelCase_ ( self : int ) -> str:
"""simple docstring"""
__lowercase = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
__lowercase = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
__lowercase = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
__lowercase = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 332
| 0
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _snake_case ( __snake_case , __snake_case ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = XCLIPTextConfig()
# derive patch size from model name
UpperCAmelCase_ : Optional[int] = model_name.find("patch" )
UpperCAmelCase_ : Optional[Any] = int(model_name[start_idx + len("patch" ) : start_idx + len("patch" ) + 2] )
UpperCAmelCase_ : List[Any] = XCLIPVisionConfig(patch_size=_snake_case , num_frames=_snake_case )
if "large" in model_name:
UpperCAmelCase_ : Union[str, Any] = 7_6_8
UpperCAmelCase_ : Tuple = 3_0_7_2
UpperCAmelCase_ : Optional[int] = 1_2
UpperCAmelCase_ : Dict = 1_0_2_4
UpperCAmelCase_ : Dict = 4_0_9_6
UpperCAmelCase_ : Any = 1_6
UpperCAmelCase_ : List[Any] = 2_4
UpperCAmelCase_ : str = 7_6_8
UpperCAmelCase_ : Union[str, Any] = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Dict = 3_3_6
UpperCAmelCase_ : List[Any] = XCLIPConfig.from_text_vision_configs(_snake_case , _snake_case )
if "large" in model_name:
UpperCAmelCase_ : Optional[Any] = 7_6_8
return config
def _snake_case ( __snake_case ) -> Union[str, Any]:
'''simple docstring'''
if name == "token_embedding.weight":
UpperCAmelCase_ : Union[str, Any] = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight" )
if name == "positional_embedding":
UpperCAmelCase_ : Union[str, Any] = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight" )
if "ln_1" in name:
UpperCAmelCase_ : int = name.replace("ln_1" , "layer_norm1" )
if "ln_2" in name:
UpperCAmelCase_ : Dict = name.replace("ln_2" , "layer_norm2" )
if "c_fc" in name:
UpperCAmelCase_ : List[Any] = name.replace("c_fc" , "fc1" )
if "c_proj" in name:
UpperCAmelCase_ : List[Any] = name.replace("c_proj" , "fc2" )
if name.startswith("transformer.resblocks" ):
UpperCAmelCase_ : Dict = name.replace("transformer.resblocks" , "text_model.encoder.layers" )
if "attn.out_proj" in name and "message" not in name:
UpperCAmelCase_ : List[Any] = name.replace("attn.out_proj" , "self_attn.out_proj" )
if "ln_final" in name:
UpperCAmelCase_ : int = name.replace("ln_final" , "text_model.final_layer_norm" )
# visual encoder
if name == "visual.class_embedding":
UpperCAmelCase_ : List[Any] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding" )
if name == "visual.positional_embedding":
UpperCAmelCase_ : str = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight" )
if name.startswith("visual.transformer.resblocks" ):
UpperCAmelCase_ : Dict = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers" )
if "visual.conv1" in name:
UpperCAmelCase_ : Union[str, Any] = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding" )
if "visual.ln_pre" in name:
UpperCAmelCase_ : Any = name.replace("visual.ln_pre" , "vision_model.pre_layernorm" )
if "visual.ln_post" in name:
UpperCAmelCase_ : str = name.replace("visual.ln_post" , "vision_model.post_layernorm" )
if "visual.proj" in name:
UpperCAmelCase_ : List[str] = name.replace("visual.proj" , "visual_projection.weight" )
if "text_projection" in name:
UpperCAmelCase_ : int = name.replace("text_projection" , "text_projection.weight" )
# things on top
if "prompts_visual_proj" in name:
UpperCAmelCase_ : Optional[Any] = name.replace("prompts_visual_proj" , "prompts_visual_projection" )
if "prompts_visual_ln" in name:
UpperCAmelCase_ : Optional[int] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm" )
# mit
if name == "mit.positional_embedding":
UpperCAmelCase_ : Optional[int] = name.replace("positional" , "position" )
if name.startswith("mit.resblocks" ):
UpperCAmelCase_ : str = name.replace("mit.resblocks" , "mit.encoder.layers" )
# prompts generator
if name.startswith("prompts_generator.norm" ):
UpperCAmelCase_ : List[str] = name.replace("prompts_generator.norm" , "prompts_generator.layernorm" )
return name
def _snake_case ( __snake_case , __snake_case ) -> Tuple:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ : Any = orig_state_dict.pop(_snake_case )
if "attn.in_proj" in key:
UpperCAmelCase_ : List[Any] = key.split("." )
if key.startswith("visual" ):
UpperCAmelCase_ : Optional[Any] = key_split[3]
UpperCAmelCase_ : List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCAmelCase_ : Any = val[
:dim, :
]
UpperCAmelCase_ : Tuple = val[
dim : dim * 2, :
]
UpperCAmelCase_ : List[str] = val[
-dim:, :
]
else:
UpperCAmelCase_ : Dict = val[
:dim
]
UpperCAmelCase_ : Union[str, Any] = val[
dim : dim * 2
]
UpperCAmelCase_ : Optional[Any] = val[
-dim:
]
else:
if "weight" in key:
UpperCAmelCase_ : str = val[
:dim, :
]
UpperCAmelCase_ : Optional[int] = val[
dim : dim * 2, :
]
UpperCAmelCase_ : Optional[Any] = val[
-dim:, :
]
else:
UpperCAmelCase_ : Tuple = val[:dim]
UpperCAmelCase_ : Any = val[
dim : dim * 2
]
UpperCAmelCase_ : Dict = val[-dim:]
elif key.startswith("mit" ):
UpperCAmelCase_ : str = key_split[2]
UpperCAmelCase_ : List[Any] = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCAmelCase_ : List[str] = val[:dim, :]
UpperCAmelCase_ : List[str] = val[dim : dim * 2, :]
UpperCAmelCase_ : Dict = val[-dim:, :]
else:
UpperCAmelCase_ : int = val[:dim]
UpperCAmelCase_ : Any = val[dim : dim * 2]
UpperCAmelCase_ : Any = val[-dim:]
else:
UpperCAmelCase_ : Optional[int] = key_split[2]
UpperCAmelCase_ : Dict = config.text_config.hidden_size
if "weight" in key:
UpperCAmelCase_ : List[str] = val[:dim, :]
UpperCAmelCase_ : Dict = val[
dim : dim * 2, :
]
UpperCAmelCase_ : int = val[-dim:, :]
else:
UpperCAmelCase_ : List[str] = val[:dim]
UpperCAmelCase_ : List[str] = val[
dim : dim * 2
]
UpperCAmelCase_ : int = val[-dim:]
else:
UpperCAmelCase_ : str = rename_key(_snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCAmelCase_ : Optional[Any] = val.T
UpperCAmelCase_ : List[Any] = val
return orig_state_dict
def _snake_case ( __snake_case ) -> List[Any]:
'''simple docstring'''
if num_frames == 8:
UpperCAmelCase_ : Tuple = "eating_spaghetti_8_frames.npy"
elif num_frames == 1_6:
UpperCAmelCase_ : Any = "eating_spaghetti.npy"
elif num_frames == 3_2:
UpperCAmelCase_ : Optional[int] = "eating_spaghetti_32_frames.npy"
UpperCAmelCase_ : Union[str, Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=_snake_case , repo_type="dataset" , )
UpperCAmelCase_ : List[Any] = np.load(_snake_case )
return list(_snake_case )
def _snake_case ( __snake_case , __snake_case=None , __snake_case=False ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = {
# fully supervised kinetics-400 checkpoints
"xclip-base-patch32": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth",
"xclip-base-patch32-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth"
),
"xclip-base-patch16": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth",
"xclip-base-patch16-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth"
),
"xclip-large-patch14": "https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb",
"xclip-large-patch14-16-frames": "https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f",
# fully supervised kinetics-600 checkpoints
"xclip-base-patch16-kinetics-600": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth"
),
"xclip-base-patch16-kinetics-600-16-frames": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth"
),
"xclip-large-patch14-kinetics-600": "https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be",
# few shot
"xclip-base-patch16-hmdb-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth"
),
"xclip-base-patch16-hmdb-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth"
),
"xclip-base-patch16-hmdb-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth"
),
"xclip-base-patch16-hmdb-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth"
),
"xclip-base-patch16-ucf-2-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth"
),
"xclip-base-patch16-ucf-4-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth"
),
"xclip-base-patch16-ucf-8-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth"
),
"xclip-base-patch16-ucf-16-shot": (
"https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth"
),
# zero shot
"xclip-base-patch16-zero-shot": "https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth",
}
UpperCAmelCase_ : List[str] = model_to_url[model_name]
UpperCAmelCase_ : Union[str, Any] = 8
if "16-frames" in model_name:
UpperCAmelCase_ : Union[str, Any] = 1_6
elif "shot" in model_name:
UpperCAmelCase_ : Optional[int] = 3_2
UpperCAmelCase_ : List[str] = get_xclip_config(_snake_case , _snake_case )
UpperCAmelCase_ : Dict = XCLIPModel(_snake_case )
model.eval()
if "drive" in checkpoint_url:
UpperCAmelCase_ : Optional[int] = "pytorch_model.bin"
gdown.cached_download(_snake_case , _snake_case , quiet=_snake_case )
UpperCAmelCase_ : Tuple = torch.load(_snake_case , map_location="cpu" )["model"]
else:
UpperCAmelCase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(_snake_case )["model"]
UpperCAmelCase_ : Optional[int] = convert_state_dict(_snake_case , _snake_case )
UpperCAmelCase_ : Optional[int] = XCLIPModel(_snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model.load_state_dict(_snake_case , strict=_snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCAmelCase_ : Tuple = 3_3_6 if model_name == "xclip-large-patch14-16-frames" else 2_2_4
UpperCAmelCase_ : Any = VideoMAEImageProcessor(size=_snake_case )
UpperCAmelCase_ : Any = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ : Any = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32" )
UpperCAmelCase_ : Optional[Any] = XCLIPProcessor(image_processor=_snake_case , tokenizer=_snake_case )
UpperCAmelCase_ : Tuple = prepare_video(_snake_case )
UpperCAmelCase_ : Any = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=_snake_case , return_tensors="pt" , padding=_snake_case )
print("Shape of pixel values:" , inputs.pixel_values.shape )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**_snake_case )
# Verify outputs
UpperCAmelCase_ : Optional[Any] = outputs.logits_per_video
UpperCAmelCase_ : List[str] = logits_per_video.softmax(dim=1 )
print("Probs:" , _snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCAmelCase_ : str = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCAmelCase_ : Tuple = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] )
elif model_name == "xclip-base-patch16":
UpperCAmelCase_ : int = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCAmelCase_ : str = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] )
elif model_name == "xclip-large-patch14":
UpperCAmelCase_ : List[str] = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCAmelCase_ : Tuple = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCAmelCase_ : Any = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCAmelCase_ : Dict = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCAmelCase_ : List[Any] = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCAmelCase_ : Dict = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCAmelCase_ : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCAmelCase_ : List[str] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCAmelCase_ : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCAmelCase_ : Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCAmelCase_ : Optional[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] )
else:
raise ValueError(F"""Model name {model_name} not supported""" )
assert torch.allclose(_snake_case , _snake_case , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_snake_case )
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub..." )
model.push_to_hub(_snake_case , organization="nielsr" )
processor.push_to_hub(_snake_case , organization="nielsr" )
slow_tokenizer.push_to_hub(_snake_case , organization="nielsr" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__lowerCamelCase = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 708
|
import operator
def _snake_case ( __snake_case , __snake_case = False , __snake_case = None ) -> list:
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = operator.lt if reverse else operator.gt
UpperCAmelCase_ : int = solution or []
if not arr:
return solution
UpperCAmelCase_ : Union[str, Any] = [arr.pop(0 )]
for i, item in enumerate(__snake_case ):
if _operator(__snake_case , sublist[-1] ):
sublist.append(__snake_case )
arr.pop(__snake_case )
# merging sublist into solution list
if not solution:
solution.extend(__snake_case )
else:
while sublist:
UpperCAmelCase_ : Optional[int] = sublist.pop(0 )
for i, xx in enumerate(__snake_case ):
if not _operator(__snake_case , __snake_case ):
solution.insert(__snake_case , __snake_case )
break
else:
solution.append(__snake_case )
strand_sort(__snake_case , __snake_case , __snake_case )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 455
| 0
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=13 , lowerCamelCase__=7 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=99 , lowerCamelCase__=32 , lowerCamelCase__=5 , lowerCamelCase__=4 , lowerCamelCase__=37 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=512 , lowerCamelCase__=16 , lowerCamelCase__=2 , lowerCamelCase__=0.02 , lowerCamelCase__=4 , ) -> Optional[int]:
lowercase__ : Dict = parent
lowercase__ : Tuple = batch_size
lowercase__ : List[Any] = seq_length
lowercase__ : Union[str, Any] = is_training
lowercase__ : int = use_attention_mask
lowercase__ : Dict = use_token_type_ids
lowercase__ : int = use_labels
lowercase__ : Dict = vocab_size
lowercase__ : List[Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : Optional[int] = num_attention_heads
lowercase__ : int = intermediate_size
lowercase__ : str = hidden_act
lowercase__ : List[Any] = hidden_dropout_prob
lowercase__ : Tuple = attention_probs_dropout_prob
lowercase__ : Tuple = max_position_embeddings
lowercase__ : List[str] = type_vocab_size
lowercase__ : List[str] = type_sequence_label_size
lowercase__ : str = initializer_range
lowercase__ : Any = num_choices
def UpperCAmelCase__( self ) -> Optional[int]:
lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ : Dict = None
if self.use_attention_mask:
lowercase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ : str = None
if self.use_token_type_ids:
lowercase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__ : Tuple = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCAmelCase__( self ) -> Dict:
lowercase__ : str = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = config_and_inputs
lowercase__ : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def UpperCAmelCase__( self ) -> Union[str, Any]:
lowercase__ : Any = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs
lowercase__ : List[Any] = True
lowercase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_a : Any = True
_a : Dict = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : List[str] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def UpperCAmelCase__( self ) -> int:
for model_class_name in self.all_model_classes:
lowercase__ : Optional[Any] = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase__ )
lowercase__ : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCamelCase__ )
@require_flax
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__( self ) -> Any:
lowercase__ : List[Any] = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase__ )
lowercase__ : Any = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowercase__ : List[str] = model(lowerCamelCase__ )[0]
lowercase__ : Any = [1, 11, 5_0265]
self.assertEqual(list(output.shape ) , lowerCamelCase__ )
# compare the actual values for a slice.
lowercase__ : Dict = np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Any = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase__ )
lowercase__ : Dict = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
lowercase__ : str = model(lowerCamelCase__ )[0]
# compare the actual values for a slice.
lowercase__ : Optional[int] = np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase__ , atol=1E-4 ) )
| 200
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 200
| 1
|
'''simple docstring'''
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def lowerCAmelCase__ ( ):
a__ , a__ : List[Any] = 9, 14 # noqa: F841
a__ : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
a__ : Optional[Any] = defaultdict(snake_case__ )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
a__ : List[Any] = mst(snake_case__ )
a__ : str = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
a__ : Dict = tuple(answer[:2] )
a__ : List[str] = tuple(edge[::-1] )
assert edge in result or reverse in result
| 717
|
'''simple docstring'''
from math import factorial
def __a ( lowerCAmelCase__ : int , lowerCAmelCase__ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(lowerCAmelCase__ ) // (factorial(lowerCAmelCase__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'fifty-two card deck is: {combinations(5_2, 5)}\n',
)
print(
'If a class of 40 students must be arranged into groups of',
f'4 for group projects, there are {combinations(4_0, 4)} ways',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'are {combinations(1_0, 3)} ways that first, second and',
'third place can be awarded.',
)
| 340
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = 1
SCREAMING_SNAKE_CASE__ : int = 3
SCREAMING_SNAKE_CASE__ : List[Any] = (32, 32)
SCREAMING_SNAKE_CASE__ : str = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_a )
return image
@property
def _a ( self ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def _a ( self ) -> Union[str, Any]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def _a ( self ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_a )
@property
def _a ( self ) -> Optional[Any]:
"""simple docstring"""
def extract(*_a , **_a ):
class __a :
'''simple docstring'''
def __init__( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.ones([0] )
def _a ( self , _a ) -> List[Any]:
"""simple docstring"""
self.pixel_values.to(_a )
return self
return Out()
return extract
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_a , set_alpha_to_one=_a , )
SCREAMING_SNAKE_CASE__ : Any = self.dummy_vae
SCREAMING_SNAKE_CASE__ : int = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : Any = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Any = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = output.images
SCREAMING_SNAKE_CASE__ : List[str] = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_a , )[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Optional[int] = PNDMScheduler(skip_prk_steps=_a )
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : str = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe([prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : Any = torch.Generator(device=_a ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(
[prompt] , generator=_a , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , return_dict=_a , )[0]
SCREAMING_SNAKE_CASE__ : str = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""" , safety_checker=_a )
assert isinstance(_a , _a )
assert isinstance(pipe.scheduler , _a )
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__ : List[Any] = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_a )
SCREAMING_SNAKE_CASE__ : str = StableDiffusionPipeline.from_pretrained(_a )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
SCREAMING_SNAKE_CASE__ : str = pipe("""example prompt""" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def _a ( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Any = PNDMScheduler(skip_prk_steps=_a )
SCREAMING_SNAKE_CASE__ : int = self.dummy_vae
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
SCREAMING_SNAKE_CASE__ : int = unet.half()
SCREAMING_SNAKE_CASE__ : Optional[Any] = vae.half()
SCREAMING_SNAKE_CASE__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Dict = StableDiffusionPipeline(
unet=_a , scheduler=_a , vae=_a , text_encoder=_a , tokenizer=_a , safety_checker=_a , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class __a (unittest.TestCase):
'''simple docstring'''
def _a ( self ) -> Dict:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : str = (
"""portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"""
""" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"""
""" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"""
""" children from bahnhof zoo, detailed """
)
SCREAMING_SNAKE_CASE__ : int = 4_003_660_346
SCREAMING_SNAKE_CASE__ : int = 7
# without safety guidance (sld_guidance_scale = 0)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
SCREAMING_SNAKE_CASE__ : List[Any] = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : Optional[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" , safety_checker=_a )
SCREAMING_SNAKE_CASE__ : Tuple = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : Dict = """padme amidala taking a bath artwork, safe for work, no nudity"""
SCREAMING_SNAKE_CASE__ : Any = 2_734_971_755
SCREAMING_SNAKE_CASE__ : Optional[Any] = 7
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : Dict = output.images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Any = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
SCREAMING_SNAKE_CASE__ : Tuple = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : str = output.images
SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : List[str] = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _a ( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
SCREAMING_SNAKE_CASE__ : List[Any] = (
"""the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."""
""" leyendecker"""
)
SCREAMING_SNAKE_CASE__ : Dict = 1_044_355_234
SCREAMING_SNAKE_CASE__ : Optional[Any] = 12
SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=0 , )
SCREAMING_SNAKE_CASE__ : List[str] = output.images
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
SCREAMING_SNAKE_CASE__ : Any = torch.manual_seed(_a )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe(
[prompt] , generator=_a , guidance_scale=_a , num_inference_steps=50 , output_type="""np""" , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.images
SCREAMING_SNAKE_CASE__ : List[Any] = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Any = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 680
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
a :str = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a :str = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
a :Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 680
| 1
|
'''simple docstring'''
UpperCamelCase_ = {
"Pillow": "Pillow",
"accelerate": "accelerate>=0.11.0",
"compel": "compel==0.1.8",
"black": "black~=23.1",
"datasets": "datasets",
"filelock": "filelock",
"flax": "flax>=0.4.1",
"hf-doc-builder": "hf-doc-builder>=0.3.0",
"huggingface-hub": "huggingface-hub>=0.13.2",
"requests-mock": "requests-mock==1.10.0",
"importlib_metadata": "importlib_metadata",
"invisible-watermark": "invisible-watermark",
"isort": "isort>=5.5.4",
"jax": "jax>=0.2.8,!=0.3.2",
"jaxlib": "jaxlib>=0.1.65",
"Jinja2": "Jinja2",
"k-diffusion": "k-diffusion>=0.0.12",
"torchsde": "torchsde",
"note_seq": "note_seq",
"librosa": "librosa",
"numpy": "numpy",
"omegaconf": "omegaconf",
"parameterized": "parameterized",
"protobuf": "protobuf>=3.20.3,<4",
"pytest": "pytest",
"pytest-timeout": "pytest-timeout",
"pytest-xdist": "pytest-xdist",
"ruff": "ruff>=0.0.241",
"safetensors": "safetensors",
"sentencepiece": "sentencepiece>=0.1.91,!=0.1.92",
"scipy": "scipy",
"onnx": "onnx",
"regex": "regex!=2019.12.17",
"requests": "requests",
"tensorboard": "tensorboard",
"torch": "torch>=1.4",
"torchvision": "torchvision",
"transformers": "transformers>=4.25.1",
"urllib3": "urllib3<=2.0.0",
}
| 508
|
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/config.json",
# See all BART models at https://huggingface.co/models?filter=bart
}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = '''bart'''
A : Dict = ['''past_key_values''']
A : Tuple = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self, A=50_265, A=1_024, A=12, A=4_096, A=16, A=12, A=4_096, A=16, A=0.0, A=0.0, A="gelu", A=1_024, A=0.1, A=0.0, A=0.0, A=0.02, A=0.0, A=False, A=True, A=3, A=1, A=0, A=2, A=True, A=2, A=2, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = vocab_size
SCREAMING_SNAKE_CASE : int = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = d_model
SCREAMING_SNAKE_CASE : int = encoder_ffn_dim
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE : Optional[int] = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = decoder_layers
SCREAMING_SNAKE_CASE : str = decoder_attention_heads
SCREAMING_SNAKE_CASE : Any = dropout
SCREAMING_SNAKE_CASE : str = attention_dropout
SCREAMING_SNAKE_CASE : Optional[Any] = activation_dropout
SCREAMING_SNAKE_CASE : List[Any] = activation_function
SCREAMING_SNAKE_CASE : str = init_std
SCREAMING_SNAKE_CASE : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE : Optional[int] = decoder_layerdrop
SCREAMING_SNAKE_CASE : List[str] = classifier_dropout
SCREAMING_SNAKE_CASE : int = use_cache
SCREAMING_SNAKE_CASE : Dict = encoder_layers
SCREAMING_SNAKE_CASE : List[Any] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=A, pad_token_id=A, bos_token_id=A, eos_token_id=A, is_encoder_decoder=A, decoder_start_token_id=A, forced_eos_token_id=A, **A, )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated', A ):
SCREAMING_SNAKE_CASE : str = self.bos_token_id
warnings.warn(
F"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'The config can simply be saved and uploaded again to be fixed.' )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE : Union[str, Any] = {0: 'batch'}
SCREAMING_SNAKE_CASE : Any = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE : str = {0: 'batch', 1: 'decoder_sequence'}
SCREAMING_SNAKE_CASE : Any = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(A, direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
SCREAMING_SNAKE_CASE : int = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_layers
for i in range(A ):
SCREAMING_SNAKE_CASE : str = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def UpperCamelCase_ ( self ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : List[Any] = super().outputs
else:
SCREAMING_SNAKE_CASE : Dict = super(A, self ).outputs
if self.use_past:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = self.num_layers
for i in range(A ):
SCREAMING_SNAKE_CASE : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
SCREAMING_SNAKE_CASE : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def UpperCamelCase_ ( self, A, A = -1, A = -1, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A, A, A, A, A )
# Generate decoder inputs
SCREAMING_SNAKE_CASE : Optional[int] = seq_length if not self.use_past else 1
SCREAMING_SNAKE_CASE : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A, A, A, A, A )
SCREAMING_SNAKE_CASE : List[str] = {F"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
SCREAMING_SNAKE_CASE : Optional[Any] = dict(**A, **A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = common_inputs['input_ids'].shape
SCREAMING_SNAKE_CASE : Tuple = common_inputs['decoder_input_ids'].shape[1]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = self.num_attention_heads
SCREAMING_SNAKE_CASE : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Union[str, Any] = decoder_seq_length + 3
SCREAMING_SNAKE_CASE : int = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(A, A )], dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = self.num_layers
SCREAMING_SNAKE_CASE : Dict = min(A, A )
SCREAMING_SNAKE_CASE : List[str] = max(A, A ) - min_num_layers
SCREAMING_SNAKE_CASE : Any = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(A ):
common_inputs["past_key_values"].append(
(
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
torch.zeros(A ),
) )
# TODO: test this.
SCREAMING_SNAKE_CASE : Optional[int] = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(A, A ):
common_inputs["past_key_values"].append((torch.zeros(A ), torch.zeros(A )) )
return common_inputs
def UpperCamelCase_ ( self, A, A = -1, A = -1, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A, A, A, A, A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : List[str] = seqlen + 2
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = self.num_layers
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : str = self.num_attention_heads
SCREAMING_SNAKE_CASE : int = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
SCREAMING_SNAKE_CASE : Optional[int] = common_inputs['attention_mask'].dtype
SCREAMING_SNAKE_CASE : Tuple = torch.cat(
[common_inputs['attention_mask'], torch.ones(A, A, dtype=A )], dim=1 )
SCREAMING_SNAKE_CASE : Union[str, Any] = [
(torch.zeros(A ), torch.zeros(A )) for _ in range(A )
]
return common_inputs
def UpperCamelCase_ ( self, A, A = -1, A = -1, A = False, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = compute_effective_axis_dimension(
A, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
SCREAMING_SNAKE_CASE : List[Any] = tokenizer.num_special_tokens_to_add(A )
SCREAMING_SNAKE_CASE : int = compute_effective_axis_dimension(
A, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=A )
# Generate dummy inputs according to compute batch and sequence
SCREAMING_SNAKE_CASE : int = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
SCREAMING_SNAKE_CASE : Tuple = dict(tokenizer(A, return_tensors=A ) )
return common_inputs
def UpperCamelCase_ ( self, A, A = -1, A = -1, A = False, A = None, ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
A, batch_size=A, seq_length=A, is_pair=A, framework=A )
elif self.task == "causal-lm":
SCREAMING_SNAKE_CASE : List[Any] = self._generate_dummy_inputs_for_causal_lm(
A, batch_size=A, seq_length=A, is_pair=A, framework=A )
else:
SCREAMING_SNAKE_CASE : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
A, batch_size=A, seq_length=A, is_pair=A, framework=A )
return common_inputs
def UpperCamelCase_ ( self, A, A, A, A ):
'''simple docstring'''
if self.task in ["default", "seq2seq-lm"]:
SCREAMING_SNAKE_CASE : List[str] = super()._flatten_past_key_values_(A, A, A, A )
else:
SCREAMING_SNAKE_CASE : Dict = super(A, self )._flatten_past_key_values_(
A, A, A, A )
| 508
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_SCREAMING_SNAKE_CASE = {"configuration_swin": ["SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwinConfig", "SwinOnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwinForImageClassification",
"SwinForMaskedImageModeling",
"SwinModel",
"SwinPreTrainedModel",
"SwinBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSwinForImageClassification",
"TFSwinForMaskedImageModeling",
"TFSwinModel",
"TFSwinPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 181
|
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =tempfile.mkdtemp()
_lowercase =BlipImageProcessor()
_lowercase =GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model')
_lowercase =BertTokenizerFast.from_pretrained('hf-internal-testing/tiny-random-bert')
_lowercase =InstructBlipProcessor(snake_case, snake_case, snake_case)
processor.save_pretrained(self.tmpdirname)
def UpperCamelCase__ ( self :List[str], **snake_case :str):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).tokenizer
def UpperCamelCase__ ( self :Optional[Any], **snake_case :List[Any]):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).image_processor
def UpperCamelCase__ ( self :Tuple, **snake_case :Any):
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname, **snake_case).qformer_tokenizer
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def UpperCamelCase__ ( self :List[str]):
"""simple docstring"""
_lowercase =[np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
_lowercase =[Image.fromarray(np.moveaxis(snake_case, 0, -1)) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =InstructBlipProcessor(
tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor(), qformer_tokenizer=self.get_qformer_tokenizer(), )
processor.save_pretrained(self.tmpdirname)
_lowercase =self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
_lowercase =self.get_image_processor(do_normalize=snake_case, padding_value=1.0)
_lowercase =InstructBlipProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=snake_case, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, snake_case)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, snake_case)
self.assertIsInstance(processor.qformer_tokenizer, snake_case)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase =self.prepare_image_inputs()
_lowercase =image_processor(snake_case, return_tensors='np')
_lowercase =processor(images=snake_case, return_tensors='np')
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def UpperCamelCase__ ( self :List[Any]):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase ='lower newer'
_lowercase =processor(text=snake_case)
_lowercase =tokenizer(snake_case, return_token_type_ids=snake_case)
_lowercase =qformer_tokenizer(snake_case, return_token_type_ids=snake_case)
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key], encoded_processor[key])
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key], encoded_processor['qformer_' + key])
def UpperCamelCase__ ( self :Any):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase ='lower newer'
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=snake_case, images=snake_case)
self.assertListEqual(
list(inputs.keys()), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
# test if it raises when no input is passed
with pytest.raises(snake_case):
processor()
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowercase =processor.batch_decode(snake_case)
_lowercase =tokenizer.batch_decode(snake_case)
self.assertListEqual(snake_case, snake_case)
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.get_image_processor()
_lowercase =self.get_tokenizer()
_lowercase =self.get_qformer_tokenizer()
_lowercase =InstructBlipProcessor(
tokenizer=snake_case, image_processor=snake_case, qformer_tokenizer=snake_case)
_lowercase ='lower newer'
_lowercase =self.prepare_image_inputs()
_lowercase =processor(text=snake_case, images=snake_case)
self.assertListEqual(
list(inputs.keys()), ['input_ids', 'attention_mask', 'qformer_input_ids', 'qformer_attention_mask', 'pixel_values'], )
| 181
| 1
|
import torch
from transformers import AutoModel
class SCREAMING_SNAKE_CASE ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase : Union[str, Any]="sayef/fsner-bert-base-uncased" ) -> List[str]:
"""simple docstring"""
super(lowerCAmelCase , self ).__init__()
__lowerCAmelCase : Optional[Any] = AutoModel.from_pretrained(lowerCAmelCase , return_dict=lowerCAmelCase )
__lowerCAmelCase : Any = torch.nn.CosineSimilarity(3 , 1e-08 )
__lowerCAmelCase : List[Any] = torch.nn.Softmax(dim=1 )
def SCREAMING_SNAKE_CASE ( self : int , **lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
return self.bert(**lowerCAmelCase ).last_hidden_state
def SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
return token_embeddings.sum(2 , keepdim=lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : str=1 ) -> List[Any]:
"""simple docstring"""
return self.softmax(T * self.cos(lowerCAmelCase , lowerCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ) -> int:
"""simple docstring"""
__lowerCAmelCase : Tuple = W_supports["""sizes"""].tolist()
__lowerCAmelCase : Union[str, Any] = W_supports["""start_token_id"""].item()
__lowerCAmelCase : List[Any] = W_supports["""end_token_id"""].item()
del W_supports["sizes"]
del W_supports["start_token_id"]
del W_supports["end_token_id"]
__lowerCAmelCase : Tuple = self.BERT(**lowerCAmelCase )
__lowerCAmelCase : Dict = self.BERT(**lowerCAmelCase )
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Dict = W_supports["""input_ids"""] == start_token_id
__lowerCAmelCase : Tuple = W_supports["""input_ids"""] == end_token_id
for i, size in enumerate(lowerCAmelCase ):
if i == 0:
__lowerCAmelCase : str = 0
else:
__lowerCAmelCase : List[str] = support_sizes[i - 1]
__lowerCAmelCase : Dict = S[s : s + size][start_token_masks[s : s + size]]
__lowerCAmelCase : Any = S[s : s + size][end_token_masks[s : s + size]]
__lowerCAmelCase : int = torch.matmul(q[i] , s_start.T ).sum(1 ).softmax(0 )
__lowerCAmelCase : Optional[int] = torch.matmul(q[i] , s_end.T ).sum(1 ).softmax(0 )
if p_starts is not None:
__lowerCAmelCase : Dict = torch.vstack((p_starts, p_start) )
__lowerCAmelCase : str = torch.vstack((p_ends, p_end) )
else:
__lowerCAmelCase : str = p_start
__lowerCAmelCase : Dict = p_end
return p_starts, p_ends
| 218
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"""caidas/swin2sr-classicalsr-x2-64""": (
"""https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( a_ ):
"""simple docstring"""
lowerCamelCase : List[Any] ="swin2sr"
lowerCamelCase : Optional[int] ={
"hidden_size": "embed_dim",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : str , lowerCAmelCase : List[str]=64 , lowerCAmelCase : Any=1 , lowerCAmelCase : int=3 , lowerCAmelCase : Union[str, Any]=1_80 , lowerCAmelCase : int=[6, 6, 6, 6, 6, 6] , lowerCAmelCase : Any=[6, 6, 6, 6, 6, 6] , lowerCAmelCase : Optional[Any]=8 , lowerCAmelCase : List[Any]=2.0 , lowerCAmelCase : Dict=True , lowerCAmelCase : Tuple=0.0 , lowerCAmelCase : Optional[int]=0.0 , lowerCAmelCase : Tuple=0.1 , lowerCAmelCase : Optional[Any]="gelu" , lowerCAmelCase : int=False , lowerCAmelCase : Union[str, Any]=0.02 , lowerCAmelCase : int=1e-5 , lowerCAmelCase : str=2 , lowerCAmelCase : Any=1.0 , lowerCAmelCase : Union[str, Any]="1conv" , lowerCAmelCase : str="pixelshuffle" , **lowerCAmelCase : Any , ) -> Dict:
"""simple docstring"""
super().__init__(**lowerCAmelCase )
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Union[str, Any] = patch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : Optional[int] = embed_dim
__lowerCAmelCase : Any = depths
__lowerCAmelCase : str = len(lowerCAmelCase )
__lowerCAmelCase : List[str] = num_heads
__lowerCAmelCase : str = window_size
__lowerCAmelCase : Dict = mlp_ratio
__lowerCAmelCase : List[str] = qkv_bias
__lowerCAmelCase : str = hidden_dropout_prob
__lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
__lowerCAmelCase : Tuple = drop_path_rate
__lowerCAmelCase : int = hidden_act
__lowerCAmelCase : Any = use_absolute_embeddings
__lowerCAmelCase : Any = layer_norm_eps
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : Union[str, Any] = upscale
__lowerCAmelCase : List[Any] = img_range
__lowerCAmelCase : List[Any] = resi_connection
__lowerCAmelCase : Union[str, Any] = upsampler
| 218
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.