code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ) -> str:
snake_case__ , snake_case__ : Optional[int] = [], []
while len(__SCREAMING_SNAKE_CASE ) > 1:
snake_case__ , snake_case__ : Tuple = min(__SCREAMING_SNAKE_CASE ), max(__SCREAMING_SNAKE_CASE )
start.append(__SCREAMING_SNAKE_CASE )
end.append(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
collection.remove(__SCREAMING_SNAKE_CASE )
end.reverse()
return start + collection + end
if __name__ == "__main__":
A_ = input("Enter numbers separated by a comma:\n").strip()
A_ = [int(item) for item in user_input.split(",")]
print(*merge_sort(unsorted), sep=",")
| 270
|
'''simple docstring'''
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase_ ( lowerCAmelCase_ ):
def _lowerCAmelCase ( self : Any ):
snake_case__ : Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowerCamelCase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(__lowerCamelCase , 'depth_multiplier' ) )
class lowercase_ :
def __init__( self : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict=13 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : str=32 , __lowerCamelCase : str=0.2_5 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : List[Any]=8 , __lowerCamelCase : Union[str, Any]=6 , __lowerCamelCase : Optional[Any]=32 , __lowerCamelCase : Dict=True , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : Tuple="relu6" , __lowerCamelCase : List[str]=1280 , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Tuple=0.0_2 , __lowerCamelCase : str=True , __lowerCamelCase : int=True , __lowerCamelCase : int=10 , __lowerCamelCase : int=None , ):
snake_case__ : Dict = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : List[Any] = num_channels
snake_case__ : List[str] = image_size
snake_case__ : Optional[int] = depth_multiplier
snake_case__ : int = depth_divisible_by
snake_case__ : List[Any] = min_depth
snake_case__ : Dict = expand_ratio
snake_case__ : Optional[Any] = tf_padding
snake_case__ : List[Any] = output_stride
snake_case__ : List[str] = first_layer_is_expansion
snake_case__ : Optional[Any] = finegrained_output
snake_case__ : int = hidden_act
snake_case__ : List[Any] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
snake_case__ : List[str] = classifier_dropout_prob
snake_case__ : List[Any] = use_labels
snake_case__ : Tuple = is_training
snake_case__ : Dict = num_labels
snake_case__ : Any = initializer_range
snake_case__ : int = scope
def _lowerCAmelCase ( self : int ):
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Optional[int] = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : List[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowerCAmelCase ( self : Tuple ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Union[str, Any] ):
snake_case__ : Optional[Any] = MobileNetVaModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def _lowerCAmelCase ( self : str , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Dict ):
snake_case__ : Optional[int] = self.num_labels
snake_case__ : int = MobileNetVaForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : int = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCAmelCase ( self : Dict , __lowerCamelCase : int , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
snake_case__ : Tuple = self.num_labels
snake_case__ : Optional[Any] = MobileNetVaForSemanticSegmentation(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
snake_case__ : Optional[Any] = model(__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Dict = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowerCAmelCase ( self : Any ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = config_and_inputs
snake_case__ : Union[str, Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A_ = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A_ = False
A_ = False
A_ = False
A_ = False
def _lowerCAmelCase ( self : Dict ):
snake_case__ : List[Any] = MobileNetVaModelTester(self )
snake_case__ : str = MobileNetVaConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def _lowerCAmelCase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def _lowerCAmelCase ( self : Union[str, Any] ):
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def _lowerCAmelCase ( self : int ):
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def _lowerCAmelCase ( self : Tuple ):
pass
def _lowerCAmelCase ( self : Any ):
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(__lowerCamelCase )
snake_case__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowerCAmelCase ( self : Tuple ):
def check_hidden_states_output(__lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ):
snake_case__ : Tuple = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[str] = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) )
snake_case__ : List[str] = outputs.hidden_states
snake_case__ : List[Any] = 16
self.assertEqual(len(__lowerCamelCase ) , __lowerCamelCase )
snake_case__ , snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def _lowerCAmelCase ( self : List[str] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowerCAmelCase ( self : str ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowerCamelCase )
@slow
def _lowerCAmelCase ( self : Dict ):
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Any = MobileNetVaModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__ ( ) -> Dict:
snake_case__ : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase_ ( unittest.TestCase ):
@cached_property
def _lowerCAmelCase ( self : Tuple ):
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def _lowerCAmelCase ( self : int ):
snake_case__ : Dict = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(__lowerCamelCase )
snake_case__ : int = self.default_image_processor
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : str = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**__lowerCamelCase )
# verify the logits
snake_case__ : Tuple = torch.Size((1, 1001) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
snake_case__ : int = torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1E-4 ) )
@slow
def _lowerCAmelCase ( self : int ):
snake_case__ : str = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
snake_case__ : int = model.to(__lowerCamelCase )
snake_case__ : Any = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
snake_case__ : Any = prepare_img()
snake_case__ : Dict = image_processor(images=__lowerCamelCase , return_tensors='pt' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
snake_case__ : Any = model(**__lowerCamelCase )
snake_case__ : Optional[int] = outputs.logits
# verify the logits
snake_case__ : str = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , __lowerCamelCase )
snake_case__ : Optional[Any] = torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=__lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowerCamelCase , atol=1E-4 ) )
| 270
| 1
|
from __future__ import annotations
from random import random
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self , lowerCamelCase__ = None ) -> Optional[Any]:
'''simple docstring'''
__lowerCamelCase = value
__lowerCamelCase = random()
__lowerCamelCase = None
__lowerCamelCase = None
def __repr__( self ) -> str:
'''simple docstring'''
from pprint import pformat
if self.left is None and self.right is None:
return f"""'{self.value}: {self.prior:.5}'"""
else:
return pformat(
{f"""{self.value}: {self.prior:.5}""": (self.left, self.right)} , indent=1 )
def __str__( self ) -> str:
'''simple docstring'''
__lowerCamelCase = str(self.value ) + ' '
__lowerCamelCase = str(self.left or '' )
__lowerCamelCase = str(self.right or '' )
return value + left + right
def lowerCamelCase_ ( UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> tuple[Node | None, Node | None]:
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
__lowerCamelCase , __lowerCamelCase = split(root.left , UpperCamelCase__ )
return left, root
else:
__lowerCamelCase , __lowerCamelCase = split(root.right , UpperCamelCase__ )
return root, right
def lowerCamelCase_ ( UpperCamelCase__ : Node | None , UpperCamelCase__ : Node | None ) -> Node | None:
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
__lowerCamelCase = merge(left.right , UpperCamelCase__ )
return left
else:
__lowerCamelCase = merge(UpperCamelCase__ , right.left )
return right
def lowerCamelCase_ ( UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> Node | None:
"""simple docstring"""
__lowerCamelCase = Node(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = split(UpperCamelCase__ , UpperCamelCase__ )
return merge(merge(UpperCamelCase__ , UpperCamelCase__ ) , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Node | None , UpperCamelCase__ : int ) -> Node | None:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = split(UpperCamelCase__ , value - 1 )
__lowerCamelCase , __lowerCamelCase = split(UpperCamelCase__ , UpperCamelCase__ )
return merge(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Node | None ) -> None:
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=',' )
inorder(root.right )
def lowerCamelCase_ ( UpperCamelCase__ : Node | None , UpperCamelCase__ : str ) -> Node | None:
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
__lowerCamelCase = insert(UpperCamelCase__ , int(arg[1:] ) )
elif arg[0] == "-":
__lowerCamelCase = erase(UpperCamelCase__ , int(arg[1:] ) )
else:
print('Unknown command' )
return root
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
__lowerCamelCase = None
print(
'enter numbers to create a tree, + value to add value into treap, '
'- value to erase all nodes with value. \'q\' to quit. ' )
__lowerCamelCase = input()
while args != "q":
__lowerCamelCase = interact_treap(UpperCamelCase__ , UpperCamelCase__ )
print(UpperCamelCase__ )
__lowerCamelCase = input()
print('good by!' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 167
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = tmp_path / 'file.csv'
__lowerCamelCase = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20\n ' )
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
__lowerCamelCase = tmp_path / 'malformed_file.csv'
__lowerCamelCase = textwrap.dedent(
'\\n header1,header2\n 1,2\n 10,20,\n ' )
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = tmp_path / 'csv_with_image.csv'
__lowerCamelCase = textwrap.dedent(
F"""\
image
{image_file}
""" )
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Optional[int] ) -> int:
"""simple docstring"""
__lowerCamelCase = tmp_path / 'csv_with_label.csv'
__lowerCamelCase = textwrap.dedent(
'\\n label\n good\n bad\n good\n ' )
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
@pytest.fixture
def lowerCamelCase_ ( UpperCamelCase__ : Tuple ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = tmp_path / 'csv_with_int_list.csv'
__lowerCamelCase = textwrap.dedent(
'\\n int_list\n 1 2 3\n 4 5 6\n 7 8 9\n ' )
with open(UpperCamelCase__ , 'w' ) as f:
f.write(UpperCamelCase__ )
return str(UpperCamelCase__ )
def lowerCamelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
__lowerCamelCase = Csv()
__lowerCamelCase = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(UpperCamelCase__ , match='Error tokenizing data' ):
for _ in generator:
pass
assert any(
record.levelname == 'ERROR'
and 'Failed to read file' in record.message
and os.path.basename(UpperCamelCase__ ) in record.message
for record in caplog.records )
@require_pil
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
__lowerCamelCase = f.read().splitlines()[1]
__lowerCamelCase = Csv(encoding='utf-8' , features=Features({'image': Image()} ) )
__lowerCamelCase = csv._generate_tables([[csv_file_with_image]] )
__lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('image' ).type == Image()()
__lowerCamelCase = pa_table.to_pydict()['image']
assert generated_content == [{"path": image_file, "bytes": None}]
def lowerCamelCase_ ( UpperCamelCase__ : List[str] ) -> str:
"""simple docstring"""
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
__lowerCamelCase = f.read().splitlines()[1:]
__lowerCamelCase = Csv(encoding='utf-8' , features=Features({'label': ClassLabel(names=['good', 'bad'] )} ) )
__lowerCamelCase = csv._generate_tables([[csv_file_with_label]] )
__lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('label' ).type == ClassLabel(names=['good', 'bad'] )()
__lowerCamelCase = pa_table.to_pydict()['label']
assert generated_content == [ClassLabel(names=['good', 'bad'] ).straint(UpperCamelCase__ ) for label in labels]
def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Any:
"""simple docstring"""
__lowerCamelCase = Csv(encoding='utf-8' , sep=',' , converters={'int_list': lambda UpperCamelCase__ : [int(UpperCamelCase__ ) for i in x.split()]} )
__lowerCamelCase = csv._generate_tables([[csv_file_with_int_list]] )
__lowerCamelCase = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('int_list' ).type )
__lowerCamelCase = pa_table.to_pydict()['int_list']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 167
| 1
|
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class A_ ( __UpperCamelCase ):
_SCREAMING_SNAKE_CASE = 42
class A_ ( nn.Module ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : int=3 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : Any=("DownEncoderBlock2D",) , __SCREAMING_SNAKE_CASE : List[str]=(64,) , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : List[str]="silu" , __SCREAMING_SNAKE_CASE : str=True , ):
super().__init__()
__a = layers_per_block
__a = torch.nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[0] , kernel_size=3 , stride=1 , padding=1 , )
__a = None
__a = nn.ModuleList([] )
# down
__a = block_out_channels[0]
for i, down_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__a = output_channel
__a = block_out_channels[i]
__a = i == len(__SCREAMING_SNAKE_CASE ) - 1
__a = get_down_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , add_downsample=not is_final_block , resnet_eps=1E-6 , downsample_padding=0 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
self.down_blocks.append(__SCREAMING_SNAKE_CASE )
# mid
__a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# out
__a = nn.GroupNorm(num_channels=block_out_channels[-1] , num_groups=__SCREAMING_SNAKE_CASE , eps=1E-6 )
__a = nn.SiLU()
__a = 2 * out_channels if double_z else out_channels
__a = nn.Convad(block_out_channels[-1] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
__a = False
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ):
__a = x
__a = self.conv_in(__SCREAMING_SNAKE_CASE )
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE : List[str] ):
def custom_forward(*__SCREAMING_SNAKE_CASE : Union[str, Any] ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
# down
if is_torch_version(">=" , "1.11.0" ):
for down_block in self.down_blocks:
__a = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
# middle
__a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
for down_block in self.down_blocks:
__a = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
# middle
__a = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE )
else:
# down
for down_block in self.down_blocks:
__a = down_block(__SCREAMING_SNAKE_CASE )
# middle
__a = self.mid_block(__SCREAMING_SNAKE_CASE )
# post-process
__a = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
__a = self.conv_act(__SCREAMING_SNAKE_CASE )
__a = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class A_ ( nn.Module ):
def __init__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=3 , __SCREAMING_SNAKE_CASE : List[str]=3 , __SCREAMING_SNAKE_CASE : str=("UpDecoderBlock2D",) , __SCREAMING_SNAKE_CASE : List[str]=(64,) , __SCREAMING_SNAKE_CASE : Tuple=2 , __SCREAMING_SNAKE_CASE : int=32 , __SCREAMING_SNAKE_CASE : Optional[int]="silu" , __SCREAMING_SNAKE_CASE : str="group" , ):
super().__init__()
__a = layers_per_block
__a = nn.Convad(
__SCREAMING_SNAKE_CASE , block_out_channels[-1] , kernel_size=3 , stride=1 , padding=1 , )
__a = None
__a = nn.ModuleList([] )
__a = in_channels if norm_type == """spatial""" else None
# mid
__a = UNetMidBlockaD(
in_channels=block_out_channels[-1] , resnet_eps=1E-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , output_scale_factor=1 , resnet_time_scale_shift="default" if norm_type == "group" else norm_type , attention_head_dim=block_out_channels[-1] , resnet_groups=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , )
# up
__a = list(reversed(__SCREAMING_SNAKE_CASE ) )
__a = reversed_block_out_channels[0]
for i, up_block_type in enumerate(__SCREAMING_SNAKE_CASE ):
__a = output_channel
__a = reversed_block_out_channels[i]
__a = i == len(__SCREAMING_SNAKE_CASE ) - 1
__a = get_up_block(
__SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , in_channels=__SCREAMING_SNAKE_CASE , out_channels=__SCREAMING_SNAKE_CASE , prev_output_channel=__SCREAMING_SNAKE_CASE , add_upsample=not is_final_block , resnet_eps=1E-6 , resnet_act_fn=__SCREAMING_SNAKE_CASE , resnet_groups=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , temb_channels=__SCREAMING_SNAKE_CASE , resnet_time_scale_shift=__SCREAMING_SNAKE_CASE , )
self.up_blocks.append(__SCREAMING_SNAKE_CASE )
__a = output_channel
# out
if norm_type == "spatial":
__a = SpatialNorm(block_out_channels[0] , __SCREAMING_SNAKE_CASE )
else:
__a = nn.GroupNorm(num_channels=block_out_channels[0] , num_groups=__SCREAMING_SNAKE_CASE , eps=1E-6 )
__a = nn.SiLU()
__a = nn.Convad(block_out_channels[0] , __SCREAMING_SNAKE_CASE , 3 , padding=1 )
__a = False
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple=None ):
__a = z
__a = self.conv_in(__SCREAMING_SNAKE_CASE )
__a = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(__SCREAMING_SNAKE_CASE : str ):
def custom_forward(*__SCREAMING_SNAKE_CASE : Dict ):
return module(*__SCREAMING_SNAKE_CASE )
return custom_forward
if is_torch_version(">=" , "1.11.0" ):
# middle
__a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
__a = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__a = torch.utils.checkpoint.checkpoint(
create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_reentrant=__SCREAMING_SNAKE_CASE )
else:
# middle
__a = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__a = torch.utils.checkpoint.checkpoint(create_custom_forward(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
# middle
__a = self.mid_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = sample.to(__SCREAMING_SNAKE_CASE )
# up
for up_block in self.up_blocks:
__a = up_block(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# post-process
if latent_embeds is None:
__a = self.conv_norm_out(__SCREAMING_SNAKE_CASE )
else:
__a = self.conv_norm_out(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__a = self.conv_act(__SCREAMING_SNAKE_CASE )
__a = self.conv_out(__SCREAMING_SNAKE_CASE )
return sample
class A_ ( nn.Module ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Optional[int]="random" , __SCREAMING_SNAKE_CASE : Optional[Any]=False , __SCREAMING_SNAKE_CASE : Any=True ):
super().__init__()
__a = n_e
__a = vq_embed_dim
__a = beta
__a = legacy
__a = nn.Embedding(self.n_e , self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e , 1.0 / self.n_e )
__a = remap
if self.remap is not None:
self.register_buffer("used" , torch.tensor(np.load(self.remap ) ) )
__a = self.used.shape[0]
__a = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
__a = self.re_embed
__a = self.re_embed + 1
print(
f"""Remapping {self.n_e} indices to {self.re_embed} indices. """
f"""Using {self.unknown_index} for unknown indices.""" )
else:
__a = n_e
__a = sane_index_shape
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple ):
__a = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__a = inds.reshape(ishape[0] , -1 )
__a = self.used.to(__SCREAMING_SNAKE_CASE )
__a = (inds[:, :, None] == used[None, None, ...]).long()
__a = match.argmax(-1 )
__a = match.sum(2 ) < 1
if self.unknown_index == "random":
__a = torch.randint(0 , self.re_embed , size=new[unknown].shape ).to(device=new.device )
else:
__a = self.unknown_index
return new.reshape(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Dict ):
__a = inds.shape
assert len(__SCREAMING_SNAKE_CASE ) > 1
__a = inds.reshape(ishape[0] , -1 )
__a = self.used.to(__SCREAMING_SNAKE_CASE )
if self.re_embed > self.used.shape[0]: # extra token
__a = 0 # simply set to zero
__a = torch.gather(used[None, :][inds.shape[0] * [0], :] , 1 , __SCREAMING_SNAKE_CASE )
return back.reshape(__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Any ):
__a = z.permute(0 , 2 , 3 , 1 ).contiguous()
__a = z.view(-1 , self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
__a = torch.argmin(torch.cdist(__SCREAMING_SNAKE_CASE , self.embedding.weight ) , dim=1 )
__a = self.embedding(__SCREAMING_SNAKE_CASE ).view(z.shape )
__a = None
__a = None
# compute loss for embedding
if not self.legacy:
__a = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
__a = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
__a = z + (z_q - z).detach()
# reshape back to match original input shape
__a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
if self.remap is not None:
__a = min_encoding_indices.reshape(z.shape[0] , -1 ) # add batch axis
__a = self.remap_to_used(__SCREAMING_SNAKE_CASE )
__a = min_encoding_indices.reshape(-1 , 1 ) # flatten
if self.sane_index_shape:
__a = min_encoding_indices.reshape(z_q.shape[0] , z_q.shape[2] , z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] ):
# shape specifying (batch, height, width, channel)
if self.remap is not None:
__a = indices.reshape(shape[0] , -1 ) # add batch axis
__a = self.unmap_to_all(__SCREAMING_SNAKE_CASE )
__a = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
__a = self.embedding(__SCREAMING_SNAKE_CASE )
if shape is not None:
__a = z_q.view(__SCREAMING_SNAKE_CASE )
# reshape back to match original input shape
__a = z_q.permute(0 , 3 , 1 , 2 ).contiguous()
return z_q
class A_ ( __UpperCamelCase ):
def __init__( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any]=False ):
__a = parameters
__a = torch.chunk(__SCREAMING_SNAKE_CASE , 2 , dim=1 )
__a = torch.clamp(self.logvar , -30.0 , 20.0 )
__a = deterministic
__a = torch.exp(0.5 * self.logvar )
__a = torch.exp(self.logvar )
if self.deterministic:
__a = torch.zeros_like(
self.mean , device=self.parameters.device , dtype=self.parameters.dtype )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] = None ):
__a = randn_tensor(
self.mean.shape , generator=__SCREAMING_SNAKE_CASE , device=self.parameters.device , dtype=self.parameters.dtype )
__a = self.mean + self.std * sample
return x
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Optional[int]=None ):
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean , 2 ) + self.var - 1.0 - self.logvar , dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean , 2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar , dim=[1, 2, 3] , )
def _UpperCAmelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=[1, 2, 3] ):
if self.deterministic:
return torch.Tensor([0.0] )
__a = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean , 2 ) / self.var , dim=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
return self.mean
| 197
|
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def __snake_case ( __A ,__A = "cpu" ,__A = None ) -> None:
lowercase : Optional[int] = torch.load(__A ,map_location=__A )
for k, v in tqdm(state_dict.items() ):
if not isinstance(__A ,torch.Tensor ):
raise TypeError("""FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin""" )
lowercase : List[str] = v.half()
if save_path is None: # overwrite src_path
lowercase : List[str] = src_path
torch.save(__A ,__A )
if __name__ == "__main__":
fire.Fire(convert)
| 607
| 0
|
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def a__ ( a , a , a , a , a=True , a="pt" ) -> List[str]:
A_ : List[Any] = {'add_prefix_space': True} if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not line.startswith(''' ''' ) else {}
A_ : str = padding_side
return tokenizer(
[line] , max_length=SCREAMING_SNAKE_CASE_ , padding='''max_length''' if pad_to_max_length else None , truncation=SCREAMING_SNAKE_CASE_ , return_tensors=SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def a__ ( a , a , a=None , ) -> Union[str, Any]:
A_ : Any = input_ids.ne(SCREAMING_SNAKE_CASE_ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __UpperCAmelCase( A__ ):
"""simple docstring"""
def __init__( self , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__="train" , __magic_name__=None , __magic_name__=None , __magic_name__=None , __magic_name__="" , ):
"""simple docstring"""
super().__init__()
A_ : Optional[Any] = Path(__magic_name__ ).joinpath(type_path + '''.source''' )
A_ : Union[str, Any] = Path(__magic_name__ ).joinpath(type_path + '''.target''' )
A_ : str = self.get_char_lens(self.src_file )
A_ : str = max_source_length
A_ : Union[str, Any] = max_target_length
assert min(self.src_lens ) > 0, f"""found empty line in {self.src_file}"""
A_ : Tuple = tokenizer
A_ : int = prefix
if n_obs is not None:
A_ : Union[str, Any] = self.src_lens[:n_obs]
A_ : List[str] = src_lang
A_ : Union[str, Any] = tgt_lang
def __len__( self ):
"""simple docstring"""
return len(self.src_lens )
def __getitem__( self , __magic_name__ ):
"""simple docstring"""
A_ : Optional[Any] = index + 1 # linecache starts at 1
A_ : int = self.prefix + linecache.getline(str(self.src_file ) , __magic_name__ ).rstrip('''\n''' )
A_ : Any = linecache.getline(str(self.tgt_file ) , __magic_name__ ).rstrip('''\n''' )
assert source_line, f"""empty source line for index {index}"""
assert tgt_line, f"""empty tgt line for index {index}"""
# Need to add eos token manually for T5
if isinstance(self.tokenizer , __magic_name__ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : Any = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , __magic_name__ ) else self.tokenizer
)
A_ : Optional[int] = self.tokenizer.generator if isinstance(self.tokenizer , __magic_name__ ) else self.tokenizer
A_ : Tuple = encode_line(__magic_name__ , __magic_name__ , self.max_source_length , '''right''' )
A_ : str = encode_line(__magic_name__ , __magic_name__ , self.max_target_length , '''right''' )
A_ : int = source_inputs['input_ids'].squeeze()
A_ : Tuple = target_inputs['input_ids'].squeeze()
A_ : Dict = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def UpperCAmelCase ( __magic_name__ ):
"""simple docstring"""
return [len(__magic_name__ ) for x in Path(__magic_name__ ).open().readlines()]
def UpperCAmelCase ( self , __magic_name__ ):
"""simple docstring"""
A_ : int = torch.stack([x['''input_ids'''] for x in batch] )
A_ : int = torch.stack([x['''attention_mask'''] for x in batch] )
A_ : List[str] = torch.stack([x['''decoder_input_ids'''] for x in batch] )
A_ : Optional[int] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , __magic_name__ )
else self.tokenizer.pad_token_id
)
A_ : Any = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , __magic_name__ )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(__magic_name__ , __magic_name__ )
A_ : List[str] = trim_batch(__magic_name__ , __magic_name__ , attention_mask=__magic_name__ )
A_ : Tuple = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowerCAmelCase = getLogger(__name__)
def a__ ( a ) -> str:
return list(itertools.chain.from_iterable(SCREAMING_SNAKE_CASE_ ) )
def a__ ( a ) -> None:
A_ : Union[str, Any] = get_git_info()
save_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , '''git_log.json''' ) )
def a__ ( a , a , a=4 , **a ) -> Any:
with open(SCREAMING_SNAKE_CASE_ , '''w''' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , indent=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def a__ ( a ) -> str:
with open(SCREAMING_SNAKE_CASE_ ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def a__ ( ) -> Optional[Any]:
A_ : int = git.Repo(search_parent_directories=SCREAMING_SNAKE_CASE_ )
A_ : Any = {
'repo_id': str(SCREAMING_SNAKE_CASE_ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def a__ ( a , a ) -> List:
return list(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def a__ ( a , a ) -> List[Any]:
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
return pickle.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def a__ ( a ) -> Optional[Any]:
def remove_articles(a ):
return re.sub(R'''\b(a|an|the)\b''' , ''' ''' , SCREAMING_SNAKE_CASE_ )
def white_space_fix(a ):
return " ".join(text.split() )
def remove_punc(a ):
A_ : Union[str, Any] = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(a ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(SCREAMING_SNAKE_CASE_ ) ) ) )
def a__ ( a , a ) -> Optional[int]:
A_ : Any = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
A_ : Tuple = normalize_answer(SCREAMING_SNAKE_CASE_ ).split()
A_ : Optional[Any] = Counter(SCREAMING_SNAKE_CASE_ ) & Counter(SCREAMING_SNAKE_CASE_ )
A_ : Any = sum(common.values() )
if num_same == 0:
return 0
A_ : Optional[Any] = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
A_ : int = 1.0 * num_same / len(SCREAMING_SNAKE_CASE_ )
A_ : Union[str, Any] = (2 * precision * recall) / (precision + recall)
return fa
def a__ ( a , a ) -> List[Any]:
return normalize_answer(SCREAMING_SNAKE_CASE_ ) == normalize_answer(SCREAMING_SNAKE_CASE_ )
def a__ ( a , a ) -> Dict:
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
A_ : str = 0
for hypo, pred in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
em += exact_match_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
em /= len(SCREAMING_SNAKE_CASE_ )
return {"em": em}
def a__ ( a ) -> Optional[Any]:
return model_prefix.startswith('''rag''' )
def a__ ( a , a , a ) -> List[str]:
A_ : Dict = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Optional[int] = 'dropout_rate'
for p in extra_params:
if getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
if not hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and not hasattr(SCREAMING_SNAKE_CASE_ , equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
continue
A_ : Any = p if hasattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) else equivalent_param[p]
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
delattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return hparams, config
| 703
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class __UpperCAmelCase( A__ ):
"""simple docstring"""
__magic_name__ = """open-llama"""
def __init__( self , __magic_name__=10_0000 , __magic_name__=4096 , __magic_name__=1_1008 , __magic_name__=32 , __magic_name__=32 , __magic_name__="silu" , __magic_name__=2048 , __magic_name__=0.02 , __magic_name__=1e-6 , __magic_name__=True , __magic_name__=0 , __magic_name__=1 , __magic_name__=2 , __magic_name__=False , __magic_name__=True , __magic_name__=0.1 , __magic_name__=0.1 , __magic_name__=True , __magic_name__=True , __magic_name__=None , **__magic_name__ , ):
"""simple docstring"""
A_ : Union[str, Any] = vocab_size
A_ : int = max_position_embeddings
A_ : str = hidden_size
A_ : Optional[int] = intermediate_size
A_ : List[str] = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Tuple = hidden_act
A_ : Union[str, Any] = initializer_range
A_ : Optional[int] = rms_norm_eps
A_ : Union[str, Any] = use_cache
A_ : Optional[int] = kwargs.pop(
'''use_memorry_efficient_attention''' , __magic_name__ )
A_ : int = hidden_dropout_prob
A_ : Any = attention_dropout_prob
A_ : Union[str, Any] = use_stable_embedding
A_ : Optional[Any] = shared_input_output_embedding
A_ : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , tie_word_embeddings=__magic_name__ , **__magic_name__ , )
def UpperCAmelCase ( self ):
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __magic_name__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
f"""got {self.rope_scaling}""" )
A_ : Tuple = self.rope_scaling.get('''type''' , __magic_name__ )
A_ : List[Any] = self.rope_scaling.get('''factor''' , __magic_name__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f"""`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(__magic_name__ , __magic_name__ ) or rope_scaling_factor <= 1.0:
raise ValueError(f"""`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 236
| 0
|
class __magic_name__ :
def __init__( self , __snake_case = "" , __snake_case = False ) -> None:
'''simple docstring'''
# Mapping from the first character of the prefix of the node
__a ={}
# A node will be a leaf if the tree contains its word
__a =is_leaf
__a =prefix
def __magic_name__ ( self , __snake_case ) -> tuple[str, str, str]:
'''simple docstring'''
__a =0
for q, w in zip(self.prefix , __snake_case ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
for word in words:
self.insert(__snake_case )
def __magic_name__ ( self , __snake_case ) -> None:
'''simple docstring'''
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
__a =True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
__a =RadixNode(prefix=__snake_case , is_leaf=__snake_case )
else:
__a =self.nodes[word[0]]
__a , __a , __a =incoming_node.match(
__snake_case )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(__snake_case )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
__a =remaining_prefix
__a =self.nodes[matching_string[0]]
__a =RadixNode(__snake_case , __snake_case )
__a =aux_node
if remaining_word == "":
__a =True
else:
self.nodes[matching_string[0]].insert(__snake_case )
def __magic_name__ ( self , __snake_case ) -> bool:
'''simple docstring'''
__a =self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
__a , __a , __a =incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(__snake_case )
def __magic_name__ ( self , __snake_case ) -> bool:
'''simple docstring'''
__a =self.nodes.get(word[0] , __snake_case )
if not incoming_node:
return False
else:
__a , __a , __a =incoming_node.match(
__snake_case )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(__snake_case )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
__a =list(self.nodes.values() )[0]
__a =merging_node.is_leaf
self.prefix += merging_node.prefix
__a =merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
__a =False
# If there is 1 edge, we merge it with its child
else:
__a =list(incoming_node.nodes.values() )[0]
__a =merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
__a =merging_node.nodes
return True
def __magic_name__ ( self , __snake_case = 0 ) -> None:
'''simple docstring'''
if self.prefix != "":
print('-' * height , self.prefix , ' (leaf)' if self.is_leaf else '' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def UpperCamelCase_( ):
"""simple docstring"""
__a ='banana bananas bandana band apple all beast'.split()
__a =RadixNode()
root.insert_many(_snake_case )
assert all(root.find(_snake_case ) for word in words )
assert not root.find('bandanas' )
assert not root.find('apps' )
root.delete('all' )
assert not root.find('all' )
root.delete('banana' )
assert not root.find('banana' )
assert root.find('bananas' )
return True
def UpperCamelCase_( ):
"""simple docstring"""
assert test_trie()
def UpperCamelCase_( ):
"""simple docstring"""
__a =RadixNode()
__a ='banana bananas bandanas bandana band apple all beast'.split()
root.insert_many(_snake_case )
print('Words:' , _snake_case )
print('Tree:' )
root.print_tree()
if __name__ == "__main__":
main()
| 242
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowerCAmelCase : Optional[int] = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : str = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["LayoutLMv3FeatureExtractor"]
_lowerCAmelCase : List[Any] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
_lowerCAmelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 242
| 1
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase__ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
def lowerCamelCase__ ( self :int ) -> int:
"""simple docstring"""
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
UpperCamelCase__ = "A painting of a squirrel eating a burger"
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(snake_case_ )
UpperCamelCase__ = replicate(snake_case_ )
UpperCamelCase__ = shard(snake_case_ )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case_ , jax.device_count() )
UpperCamelCase__ = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=2_5 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4_238, 0.4_414, 0.4_395, 0.4_453, 0.4_629, 0.4_590, 0.4_531, 0.45_508, 0.4_512] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def lowerCamelCase__ ( self :Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ = "stabilityai/stable-diffusion-2"
UpperCamelCase__ , UpperCamelCase__ = FlaxDPMSolverMultistepScheduler.from_pretrained(snake_case_ , subfolder="scheduler" )
UpperCamelCase__ , UpperCamelCase__ = FlaxStableDiffusionPipeline.from_pretrained(
snake_case_ , scheduler=snake_case_ , revision="bf16" , dtype=jnp.bfloataa , )
UpperCamelCase__ = scheduler_params
UpperCamelCase__ = "A painting of a squirrel eating a burger"
UpperCamelCase__ = jax.device_count()
UpperCamelCase__ = num_samples * [prompt]
UpperCamelCase__ = sd_pipe.prepare_inputs(snake_case_ )
UpperCamelCase__ = replicate(snake_case_ )
UpperCamelCase__ = shard(snake_case_ )
UpperCamelCase__ = jax.random.PRNGKey(0 )
UpperCamelCase__ = jax.random.split(snake_case_ , jax.device_count() )
UpperCamelCase__ = sd_pipe(snake_case_ , snake_case_ , snake_case_ , num_inference_steps=2_5 , jit=snake_case_ )[0]
assert images.shape == (jax.device_count(), 1, 7_6_8, 7_6_8, 3)
UpperCamelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCamelCase__ = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
UpperCamelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCamelCase__ = jnp.array([0.4_336, 0.42_969, 0.4_453, 0.4_199, 0.4_297, 0.4_531, 0.4_434, 0.4_434, 0.4_297] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 720
|
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import torch
from ...models import AutoencoderKL, TransformeraDModel
from ...schedulers import KarrasDiffusionSchedulers
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase ( snake_case__ ):
'''simple docstring'''
def __init__( self :int , lowerCamelCase_ :TransformeraDModel , lowerCamelCase_ :AutoencoderKL , lowerCamelCase_ :KarrasDiffusionSchedulers , lowerCamelCase_ :Optional[Dict[int, str]] = None , ) -> int:
"""simple docstring"""
super().__init__()
self.register_modules(transformer=lowerCamelCase_ , vae=lowerCamelCase_ , scheduler=lowerCamelCase_ )
# create a imagenet -> id dictionary for easier use
UpperCamelCase__ = {}
if idalabel is not None:
for key, value in idalabel.items():
for label in value.split("," ):
UpperCamelCase__ = int(lowerCamelCase_ )
UpperCamelCase__ = dict(sorted(self.labels.items() ) )
def lowerCamelCase__ ( self :Tuple , lowerCamelCase_ :Union[str, List[str]] ) -> List[int]:
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase__ = list(lowerCamelCase_ )
for l in label:
if l not in self.labels:
raise ValueError(
f'{l} does not exist. Please make sure to select one of the following labels: \n {self.labels}.' )
return [self.labels[l] for l in label]
@torch.no_grad()
def __call__( self :Optional[int] , lowerCamelCase_ :List[int] , lowerCamelCase_ :float = 4.0 , lowerCamelCase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase_ :int = 5_0 , lowerCamelCase_ :Optional[str] = "pil" , lowerCamelCase_ :bool = True , ) -> Union[ImagePipelineOutput, Tuple]:
"""simple docstring"""
UpperCamelCase__ = len(lowerCamelCase_ )
UpperCamelCase__ = self.transformer.config.sample_size
UpperCamelCase__ = self.transformer.config.in_channels
UpperCamelCase__ = randn_tensor(
shape=(batch_size, latent_channels, latent_size, latent_size) , generator=lowerCamelCase_ , device=self.device , dtype=self.transformer.dtype , )
UpperCamelCase__ = torch.cat([latents] * 2 ) if guidance_scale > 1 else latents
UpperCamelCase__ = torch.tensor(lowerCamelCase_ , device=self.device ).reshape(-1 )
UpperCamelCase__ = torch.tensor([1_0_0_0] * batch_size , device=self.device )
UpperCamelCase__ = torch.cat([class_labels, class_null] , 0 ) if guidance_scale > 1 else class_labels
# set step values
self.scheduler.set_timesteps(lowerCamelCase_ )
for t in self.progress_bar(self.scheduler.timesteps ):
if guidance_scale > 1:
UpperCamelCase__ = latent_model_input[: len(lowerCamelCase_ ) // 2]
UpperCamelCase__ = torch.cat([half, half] , dim=0 )
UpperCamelCase__ = self.scheduler.scale_model_input(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase__ = t
if not torch.is_tensor(lowerCamelCase_ ):
# TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
# This would be a good case for the `match` statement (Python 3.10+)
UpperCamelCase__ = latent_model_input.device.type == "mps"
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
UpperCamelCase__ = torch.floataa if is_mps else torch.floataa
else:
UpperCamelCase__ = torch.intaa if is_mps else torch.intaa
UpperCamelCase__ = torch.tensor([timesteps] , dtype=lowerCamelCase_ , device=latent_model_input.device )
elif len(timesteps.shape ) == 0:
UpperCamelCase__ = timesteps[None].to(latent_model_input.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase__ = timesteps.expand(latent_model_input.shape[0] )
# predict noise model_output
UpperCamelCase__ = self.transformer(
lowerCamelCase_ , timestep=lowerCamelCase_ , class_labels=lowerCamelCase_ ).sample
# perform guidance
if guidance_scale > 1:
UpperCamelCase__ , UpperCamelCase__ = noise_pred[:, :latent_channels], noise_pred[:, latent_channels:]
UpperCamelCase__ , UpperCamelCase__ = torch.split(lowerCamelCase_ , len(lowerCamelCase_ ) // 2 , dim=0 )
UpperCamelCase__ = uncond_eps + guidance_scale * (cond_eps - uncond_eps)
UpperCamelCase__ = torch.cat([half_eps, half_eps] , dim=0 )
UpperCamelCase__ = torch.cat([eps, rest] , dim=1 )
# learned sigma
if self.transformer.config.out_channels // 2 == latent_channels:
UpperCamelCase__ , UpperCamelCase__ = torch.split(lowerCamelCase_ , lowerCamelCase_ , dim=1 )
else:
UpperCamelCase__ = noise_pred
# compute previous image: x_t -> x_t-1
UpperCamelCase__ = self.scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
if guidance_scale > 1:
UpperCamelCase__ , UpperCamelCase__ = latent_model_input.chunk(2 , dim=0 )
else:
UpperCamelCase__ = latent_model_input
UpperCamelCase__ = 1 / self.vae.config.scaling_factor * latents
UpperCamelCase__ = self.vae.decode(lowerCamelCase_ ).sample
UpperCamelCase__ = (samples / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase__ = samples.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase__ = self.numpy_to_pil(lowerCamelCase_ )
if not return_dict:
return (samples,)
return ImagePipelineOutput(images=lowerCamelCase_ )
| 304
| 0
|
"""simple docstring"""
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ :
'''simple docstring'''
def __init__( self : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : Union[str, Any]=8 , _UpperCAmelCase : Union[str, Any]=True , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : Optional[int]=99 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=5 , _UpperCAmelCase : Optional[Any]=2 , _UpperCAmelCase : Dict=36 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : Optional[int]=0.0 , _UpperCAmelCase : Dict=0.0 , _UpperCAmelCase : Tuple=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : Dict=3 , _UpperCAmelCase : List[str]=4 , _UpperCAmelCase : Optional[Any]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_token_type_ids:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : int ) -> Dict:
'''simple docstring'''
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.get_config()
UpperCAmelCase_ = 300
return config
def lowercase__ ( self : int ) -> List[Any]:
'''simple docstring'''
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowercase__ ( self : int , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = MraModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , ) -> int:
'''simple docstring'''
UpperCAmelCase_ = True
UpperCAmelCase_ = MraModel(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , encoder_attention_mask=_UpperCAmelCase , )
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForQuestionAnswering(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForSequenceClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Dict , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = MraForTokenClassification(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.num_choices
UpperCAmelCase_ = MraForMultipleChoice(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase_ = model(
_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = ()
def lowercase__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MraModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase_ = type
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCAmelCase )
def lowercase__ ( self : Any ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def lowercase__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = MraModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip(reason="MRA does not output attentions" )
def lowercase__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
return
@require_torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowercase__ ( self : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
UpperCAmelCase_ = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
UpperCAmelCase_ = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase_ = model(_UpperCAmelCase )[0]
UpperCAmelCase_ = 50265
UpperCAmelCase_ = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 82
|
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a : Optional[Any] = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Any = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__a : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 397
| 0
|
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase = "\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")\n >>> pipe_prior.to(\"cuda\")\n\n >>> prompt = \"red cat, 4k photo\"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")\n >>> pipe.to(\"cuda\")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save(\"cat.png\")\n ```\n"
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=8 ) -> int:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_SCREAMING_SNAKE_CASE = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class __UpperCAmelCase (_UpperCAmelCase ):
def __init__( self: int , UpperCAmelCase_: MultilingualCLIP , UpperCAmelCase_: XLMRobertaTokenizer , UpperCAmelCase_: UNetaDConditionModel , UpperCAmelCase_: Union[DDIMScheduler, DDPMScheduler] , UpperCAmelCase_: VQModel , ):
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , unet=UpperCAmelCase__ , scheduler=UpperCAmelCase__ , movq=UpperCAmelCase__ , )
_SCREAMING_SNAKE_CASE = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def UpperCamelCase ( self: Union[str, Any] , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[str] , UpperCAmelCase_: str , UpperCAmelCase_: List[str] , UpperCAmelCase_: List[Any] , UpperCAmelCase_: Tuple ):
'''simple docstring'''
if latents is None:
_SCREAMING_SNAKE_CASE = randn_tensor(UpperCAmelCase__ , generator=UpperCAmelCase__ , device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_SCREAMING_SNAKE_CASE = latents.to(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = latents * scheduler.init_noise_sigma
return latents
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any , UpperCAmelCase_: Optional[Any] , UpperCAmelCase_: str , UpperCAmelCase_: Any=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else 1
# get prompt text embeddings
_SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="""max_length""" , truncation=UpperCAmelCase__ , max_length=77 , return_attention_mask=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = text_inputs.input_ids
_SCREAMING_SNAKE_CASE = self.tokenizer(UpperCAmelCase__ , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCAmelCase__ , UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_SCREAMING_SNAKE_CASE = text_input_ids.to(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = text_inputs.attention_mask.to(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.text_encoder(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = prompt_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
_SCREAMING_SNAKE_CASE = text_encoder_hidden_states.repeat_interleave(UpperCAmelCase__ , dim=0 )
_SCREAMING_SNAKE_CASE = text_mask.repeat_interleave(UpperCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
_SCREAMING_SNAKE_CASE = [""""""] * batch_size
elif type(UpperCAmelCase__ ) is not type(UpperCAmelCase__ ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase__ )} !='
F' {type(UpperCAmelCase__ )}.' )
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(UpperCAmelCase__ ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase__ )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
""" the batch size of `prompt`.""" )
else:
_SCREAMING_SNAKE_CASE = negative_prompt
_SCREAMING_SNAKE_CASE = self.tokenizer(
UpperCAmelCase__ , padding="""max_length""" , max_length=77 , truncation=UpperCAmelCase__ , return_attention_mask=UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors="""pt""" , )
_SCREAMING_SNAKE_CASE = uncond_input.input_ids.to(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = uncond_input.attention_mask.to(UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.text_encoder(
input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_SCREAMING_SNAKE_CASE = negative_prompt_embeds.shape[1]
_SCREAMING_SNAKE_CASE = negative_prompt_embeds.repeat(1 , UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.shape[1]
_SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.repeat(1 , UpperCAmelCase__ , 1 )
_SCREAMING_SNAKE_CASE = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , UpperCAmelCase__ , -1 )
_SCREAMING_SNAKE_CASE = uncond_text_mask.repeat_interleave(UpperCAmelCase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_SCREAMING_SNAKE_CASE = torch.cat([negative_prompt_embeds, prompt_embeds] )
_SCREAMING_SNAKE_CASE = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_SCREAMING_SNAKE_CASE = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def UpperCamelCase ( self: int , UpperCAmelCase_: Optional[Any]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_SCREAMING_SNAKE_CASE = torch.device(F'cuda:{gpu_id}' )
_SCREAMING_SNAKE_CASE = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(UpperCAmelCase__ , UpperCAmelCase__ )
def UpperCamelCase ( self: int , UpperCAmelCase_: Dict=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_SCREAMING_SNAKE_CASE = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=UpperCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_SCREAMING_SNAKE_CASE = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(UpperCAmelCase__ , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
if self.safety_checker is not None:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = cpu_offload_with_hook(self.safety_checker , UpperCAmelCase__ , prev_module_hook=UpperCAmelCase__ )
# We'll offload the last model manually.
_SCREAMING_SNAKE_CASE = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCAmelCase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(UpperCAmelCase__ )
def __call__( self: int , UpperCAmelCase_: Union[str, List[str]] , UpperCAmelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_: Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCAmelCase_: Optional[Union[str, List[str]]] = None , UpperCAmelCase_: int = 512 , UpperCAmelCase_: int = 512 , UpperCAmelCase_: int = 100 , UpperCAmelCase_: float = 4.0 , UpperCAmelCase_: int = 1 , UpperCAmelCase_: Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCAmelCase_: Optional[torch.FloatTensor] = None , UpperCAmelCase_: Optional[str] = "pil" , UpperCAmelCase_: bool = True , ):
'''simple docstring'''
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = 1
elif isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = len(UpperCAmelCase__ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase__ )}' )
_SCREAMING_SNAKE_CASE = self._execution_device
_SCREAMING_SNAKE_CASE = batch_size * num_images_per_prompt
_SCREAMING_SNAKE_CASE = guidance_scale > 1.0
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self._encode_prompt(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase__ , dim=0 )
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_SCREAMING_SNAKE_CASE = torch.cat(UpperCAmelCase__ , dim=0 )
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE = image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
_SCREAMING_SNAKE_CASE = negative_image_embeds.repeat_interleave(UpperCAmelCase__ , dim=0 )
_SCREAMING_SNAKE_CASE = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=UpperCAmelCase__ )
self.scheduler.set_timesteps(UpperCAmelCase__ , device=UpperCAmelCase__ )
_SCREAMING_SNAKE_CASE = self.scheduler.timesteps
_SCREAMING_SNAKE_CASE = self.unet.config.in_channels
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = get_new_h_w(UpperCAmelCase__ , UpperCAmelCase__ , self.movq_scale_factor )
# create initial latent
_SCREAMING_SNAKE_CASE = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(UpperCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_SCREAMING_SNAKE_CASE = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds}
_SCREAMING_SNAKE_CASE = self.unet(
sample=UpperCAmelCase__ , timestep=UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ , added_cond_kwargs=UpperCAmelCase__ , return_dict=UpperCAmelCase__ , )[0]
if do_classifier_free_guidance:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = variance_pred.chunk(2 )
_SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_SCREAMING_SNAKE_CASE = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_SCREAMING_SNAKE_CASE = self.scheduler.step(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , generator=UpperCAmelCase__ , ).prev_sample
# post-processing
_SCREAMING_SNAKE_CASE = self.movq.decode(UpperCAmelCase__ , force_not_quantize=UpperCAmelCase__ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_SCREAMING_SNAKE_CASE = image * 0.5 + 0.5
_SCREAMING_SNAKE_CASE = image.clamp(0 , 1 )
_SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 721
|
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ) -> List[str]:
"""simple docstring"""
with open(snake_case__ ) as metadata_file:
_SCREAMING_SNAKE_CASE = json.load(snake_case__ )
_SCREAMING_SNAKE_CASE = LukeConfig(use_entity_aware_attention=snake_case__ ,**metadata["""model_config"""] )
# Load in the weights from the checkpoint_path
_SCREAMING_SNAKE_CASE = torch.load(snake_case__ ,map_location="""cpu""" )
# Load the entity vocab file
_SCREAMING_SNAKE_CASE = load_entity_vocab(snake_case__ )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(metadata["""model_config"""]["""bert_model_name"""] )
# Add special tokens to the token vocabulary for downstream tasks
_SCREAMING_SNAKE_CASE = AddedToken("""<ent>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
_SCREAMING_SNAKE_CASE = AddedToken("""<ent2>""" ,lstrip=snake_case__ ,rstrip=snake_case__ )
tokenizer.add_special_tokens({"""additional_special_tokens""": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case__ )
with open(os.path.join(snake_case__ ,LukeTokenizer.vocab_files_names["""entity_vocab_file"""] ) ,"""w""" ) as f:
json.dump(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ )
# Initialize the embeddings of the special tokens
_SCREAMING_SNAKE_CASE = state_dict["""embeddings.word_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""@"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = word_emb[tokenizer.convert_tokens_to_ids(["""#"""] )[0]].unsqueeze(0 )
_SCREAMING_SNAKE_CASE = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_SCREAMING_SNAKE_CASE = F'encoder.layer.{layer_index}.attention.self.'
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
_SCREAMING_SNAKE_CASE = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_SCREAMING_SNAKE_CASE = state_dict["""entity_embeddings.entity_embeddings.weight"""]
_SCREAMING_SNAKE_CASE = entity_emb[entity_vocab["""[MASK]"""]]
_SCREAMING_SNAKE_CASE = LukeModel(config=snake_case__ ).eval()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = model.load_state_dict(snake_case__ ,strict=snake_case__ )
if not (len(snake_case__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(snake_case__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith("""entity_predictions""" ) or key.startswith("""lm_head""" ) for key in unexpected_keys )):
raise ValueError(
"""Unexpected keys"""
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_SCREAMING_SNAKE_CASE = LukeTokenizer.from_pretrained(snake_case__ ,task="""entity_classification""" )
_SCREAMING_SNAKE_CASE = (
"""Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"""
""" new world number one avoid a humiliating second- round exit at Wimbledon ."""
)
_SCREAMING_SNAKE_CASE = (39, 42)
_SCREAMING_SNAKE_CASE = tokenizer(snake_case__ ,entity_spans=[span] ,add_prefix_space=snake_case__ ,return_tensors="""pt""" )
_SCREAMING_SNAKE_CASE = model(**snake_case__ )
# Verify word hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 42, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 10_24) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
_SCREAMING_SNAKE_CASE = torch.Size((1, 1, 7_68) )
_SCREAMING_SNAKE_CASE = torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,snake_case__ ,atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("""Saving PyTorch model to {}""".format(snake_case__ ) )
model.save_pretrained(snake_case__ )
def __lowerCamelCase ( snake_case__ ) -> Optional[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = {}
with open(snake_case__ ,"""r""" ,encoding="""utf-8""" ) as f:
for index, line in enumerate(snake_case__ ):
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = line.rstrip().split("""\t""" )
_SCREAMING_SNAKE_CASE = index
return entity_vocab
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
UpperCamelCase = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 569
| 0
|
import argparse
import json
import subprocess
def __lowerCamelCase (UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : int ):
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = (
F"curl -H \"Accept: application/vnd.github+json\" -H \"Authorization: Bearer {token}\""
" https://api.github.com/repos/huggingface/transformers/actions/runners"
)
SCREAMING_SNAKE_CASE = subprocess.run(UpperCAmelCase__ , shell=UpperCAmelCase__ , stdout=subprocess.PIPE )
SCREAMING_SNAKE_CASE = output.stdout.decode("utf-8" )
SCREAMING_SNAKE_CASE = json.loads(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE = status["runners"]
for runner in runners:
if runner["name"] in target_runners:
if runner["status"] == "offline":
offline_runners.append(UpperCAmelCase__ )
# save the result so we can report them on Slack
with open("offline_runners.txt" , "w" ) as fp:
fp.write(json.dumps(UpperCAmelCase__ ) )
if len(UpperCAmelCase__ ) > 0:
SCREAMING_SNAKE_CASE = "\n".join([x["name"] for x in offline_runners] )
raise ValueError(F"The following runners are offline:\n{failed}" )
if __name__ == "__main__":
def __lowerCamelCase (UpperCAmelCase__ : Dict ):
return values.split("," )
_lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--target_runners''',
default=None,
type=list_str,
required=True,
help='''Comma-separated list of runners to check status.''',
)
parser.add_argument(
'''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.'''
)
_lowerCamelCase : Optional[Any] = parser.parse_args()
get_runner_status(args.target_runners, args.token)
| 403
|
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCamelCase (UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : int ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE = RemBertConfig.from_json_file(UpperCAmelCase__ )
print("Building PyTorch model from configuration: {}".format(str(UpperCAmelCase__ ) ) )
SCREAMING_SNAKE_CASE = RemBertModel(UpperCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Save pytorch-model
print("Save PyTorch model to {}".format(UpperCAmelCase__ ) )
torch.save(model.state_dict() , UpperCAmelCase__ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--rembert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained RemBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[str] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 403
| 1
|
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase_ ( snake_case_ : int ) -> List[str]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase_ ( snake_case_ : Optional[Any] , snake_case_ : int ) -> List[Any]:
'''simple docstring'''
return (-y * np.log(snake_case_ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = np.dot(snake_case_ , snake_case_ )
return np.sum(y * scores - np.log(1 + np.exp(snake_case_ ) ) )
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[str]=7_00_00 ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = np.zeros(x.shape[1] )
for iterations in range(snake_case_ ):
UpperCAmelCase_ = np.dot(snake_case_ , snake_case_ )
UpperCAmelCase_ = sigmoid_function(snake_case_ )
UpperCAmelCase_ = np.dot(x.T , h - y ) / y.size
UpperCAmelCase_ = theta - alpha * gradient # updating the weights
UpperCAmelCase_ = np.dot(snake_case_ , snake_case_ )
UpperCAmelCase_ = sigmoid_function(snake_case_ )
UpperCAmelCase_ = cost_function(snake_case_ , snake_case_ )
if iterations % 1_00 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: str =datasets.load_iris()
SCREAMING_SNAKE_CASE_: Union[str, Any] =iris.data[:, :2]
SCREAMING_SNAKE_CASE_: Tuple =(iris.target != 0) * 1
SCREAMING_SNAKE_CASE_: Tuple =0.1
SCREAMING_SNAKE_CASE_: List[Any] =logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def lowerCAmelCase_ ( snake_case_ : int ) -> Optional[Any]:
'''simple docstring'''
return sigmoid_function(
np.dot(snake_case_ , snake_case_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
(SCREAMING_SNAKE_CASE_): Union[str, Any] =(x[:, 0].min(), x[:, 0].max())
(SCREAMING_SNAKE_CASE_): Tuple =(x[:, 1].min(), x[:, 1].max())
(SCREAMING_SNAKE_CASE_): Union[str, Any] =np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
SCREAMING_SNAKE_CASE_: Optional[int] =np.c_[xxa.ravel(), xxa.ravel()]
SCREAMING_SNAKE_CASE_: Any =predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 712
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Dict ={
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: int =[
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: Tuple =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 415
| 0
|
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowercase__ : Any = logging.get_logger(__name__)
lowercase__ : List[str] = {'''vocab_file''': '''vocab.json'''}
lowercase__ : Union[str, Any] = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
lowercase__ : Optional[int] = {'''mgp-str''': 27}
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = VOCAB_FILES_NAMES
lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , _UpperCAmelCase , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[GO]" , _UpperCAmelCase="[s]" , _UpperCAmelCase="[GO]" , **_UpperCAmelCase):
'''simple docstring'''
super().__init__(
unk_token=_UpperCAmelCase , bos_token=_UpperCAmelCase , eos_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , **_UpperCAmelCase , )
with open(_UpperCAmelCase , encoding='utf-8') as vocab_handle:
__A : Union[str, Any] = json.load(_UpperCAmelCase)
__A : str = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return len(self.vocab)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = []
for s in text:
char_tokens.extend(_UpperCAmelCase)
return char_tokens
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.vocab.get(_UpperCAmelCase , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
return self.decoder.get(_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase = None):
'''simple docstring'''
if not os.path.isdir(_UpperCAmelCase):
logger.error('Vocabulary path ({}) should be a directory'.format(_UpperCAmelCase))
return
__A : int = os.path.join(
_UpperCAmelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(_UpperCAmelCase , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=_UpperCAmelCase , ensure_ascii=_UpperCAmelCase) + '\n')
return (vocab_file,)
| 8
|
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCAmelCase : Dict = logging.get_logger(__name__)
__lowerCAmelCase : Optional[int] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__lowerCAmelCase : int = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__lowerCAmelCase : List[str] = {"facebook/blenderbot-3B": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCAmelCase ( ):
"""simple docstring"""
__UpperCAmelCase = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
__UpperCAmelCase = bs[:]
__UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase__ )
cs.append(2**8 + n )
n += 1
__UpperCAmelCase = [chr(UpperCamelCase__ ) for n in cs]
return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
def lowerCAmelCase ( UpperCamelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase = set()
__UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__UpperCAmelCase = char
return pairs
class A ( UpperCAmelCase ):
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ['''input_ids''', '''attention_mask''']
def __init__( self : str , __a : Union[str, Any] , __a : Optional[Any] , __a : List[Any]="replace" , __a : Union[str, Any]="<s>" , __a : Any="</s>" , __a : Dict="</s>" , __a : Dict="<s>" , __a : Tuple="<unk>" , __a : List[str]="<pad>" , __a : Any="<mask>" , __a : Dict=False , **__a : Union[str, Any] , ) -> Optional[int]:
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else bos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else eos_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else sep_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else cls_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else unk_token
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__UpperCAmelCase = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
errors=__a , bos_token=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , add_prefix_space=__a , **__a , )
with open(__a , encoding='''utf-8''' ) as vocab_handle:
__UpperCAmelCase = json.load(__a )
__UpperCAmelCase = {v: k for k, v in self.encoder.items()}
__UpperCAmelCase = errors # how to handle errors in decoding
__UpperCAmelCase = bytes_to_unicode()
__UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(__a , encoding='''utf-8''' ) as merges_handle:
__UpperCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
__UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
__UpperCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__UpperCAmelCase = {}
__UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__UpperCAmelCase = re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def snake_case__ ( self : List[Any] ) -> Union[str, Any]:
return len(self.encoder )
def snake_case__ ( self : str ) -> int:
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case__ ( self : List[Any] , __a : Tuple ) -> List[Any]:
if token in self.cache:
return self.cache[token]
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = get_pairs(__a )
if not pairs:
return token
while True:
__UpperCAmelCase = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
__UpperCAmelCase , __UpperCAmelCase = bigram
__UpperCAmelCase = []
__UpperCAmelCase = 0
while i < len(__a ):
try:
__UpperCAmelCase = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__UpperCAmelCase = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__UpperCAmelCase = tuple(__a )
__UpperCAmelCase = new_word
if len(__a ) == 1:
break
else:
__UpperCAmelCase = get_pairs(__a )
__UpperCAmelCase = ''' '''.join(__a )
__UpperCAmelCase = word
return word
def snake_case__ ( self : int , __a : int ) -> List[Any]:
__UpperCAmelCase = []
for token in re.findall(self.pat , __a ):
__UpperCAmelCase = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__a ).split(''' ''' ) )
return bpe_tokens
def snake_case__ ( self : Optional[Any] , __a : Tuple ) -> str:
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case__ ( self : Optional[int] , __a : Any ) -> List[str]:
return self.decoder.get(__a )
def snake_case__ ( self : Union[str, Any] , __a : List[str] ) -> List[Any]:
__UpperCAmelCase = ''''''.join(__a )
__UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def snake_case__ ( self : Union[str, Any] , __a : str , __a : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
__UpperCAmelCase = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(__a , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + '''\n''' )
__UpperCAmelCase = 0
with open(__a , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
''' Please check that the tokenizer is not corrupted!''' )
__UpperCAmelCase = token_index
writer.write(''' '''.join(__a ) + '''\n''' )
index += 1
return vocab_file, merge_file
def snake_case__ ( self : Union[str, Any] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
if token_ids_a is None:
return [1] + ([0] * len(__a )) + [1]
return [1] + ([0] * len(__a )) + [1, 1] + ([0] * len(__a )) + [1]
def snake_case__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase = [self.sep_token_id]
__UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case__ ( self : int , __a : Optional[int] , __a : int=False , **__a : Union[str, Any] ) -> Union[str, Any]:
__UpperCAmelCase = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__a ) > 0 and not text[0].isspace()):
__UpperCAmelCase = ''' ''' + text
return (text, kwargs)
def snake_case__ ( self : List[str] , __a : List[int] , __a : Optional[List[int]] = None ) -> Dict:
return token_ids_a + [self.eos_token_id]
def snake_case__ ( self : Optional[Any] , __a : "Conversation" ) -> List[int]:
__UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(''' ''' + text )
else:
# Generated responses should contain them already.
inputs.append(__a )
__UpperCAmelCase = ''' '''.join(__a )
__UpperCAmelCase = self.encode(__a )
if len(__a ) > self.model_max_length:
__UpperCAmelCase = input_ids[-self.model_max_length :]
logger.warning(f"""Trimmed input from conversation as it was longer than {self.model_max_length} tokens.""" )
return input_ids
| 262
| 0
|
from math import ceil
def __a ( __lowerCAmelCase = 1001 ) -> int:
SCREAMING_SNAKE_CASE : Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : List[Any] = 2 * i + 1
SCREAMING_SNAKE_CASE : Optional[int] = 2 * i
SCREAMING_SNAKE_CASE : Optional[Any] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_lowerCamelCase : Optional[Any] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number""")
| 308
|
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
_lowerCamelCase : Any = abspath(join(dirname(__file__), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def __a ( __lowerCAmelCase ) -> Optional[int]:
config.addinivalue_line(
'markers' , 'is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested' )
config.addinivalue_line(
'markers' , 'is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested' )
config.addinivalue_line('markers' , 'is_pipeline_test: mark test to run only when pipelines are tested' )
config.addinivalue_line('markers' , 'is_staging_test: mark test to run only in the staging environment' )
config.addinivalue_line('markers' , 'accelerate_tests: mark test that require accelerate' )
config.addinivalue_line('markers' , 'tool_tests: mark the tool tests that are run on their specific schedule' )
def __a ( __lowerCAmelCase ) -> int:
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__lowerCAmelCase )
def __a ( __lowerCAmelCase ) -> str:
from transformers.testing_utils import pytest_terminal_summary_main
SCREAMING_SNAKE_CASE : Optional[Any] = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(__lowerCAmelCase , id=__lowerCAmelCase )
def __a ( __lowerCAmelCase , __lowerCAmelCase ) -> Optional[Any]:
# If no tests are collected, pytest exists with code 5, which makes the CI fail.
if exitstatus == 5:
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
# Doctest custom flag to ignore output.
_lowerCamelCase : List[Any] = doctest.register_optionflag("""IGNORE_RESULT""")
_lowerCamelCase : Dict = doctest.OutputChecker
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def lowerCamelCase_ ( self : List[str] , snake_case : Dict , snake_case : Optional[Any] , snake_case : Optional[int] ):
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , snake_case , snake_case , snake_case )
_lowerCamelCase : Any = CustomOutputChecker
_lowerCamelCase : List[str] = HfDoctestModule
_lowerCamelCase : int = HfDocTestParser
| 308
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 109
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : Any = {
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__a : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 637
| 0
|
'''simple docstring'''
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ :Optional[int] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ :Tuple = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE_ :List[Any] = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=SCREAMING_SNAKE_CASE )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ :Any = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=SCREAMING_SNAKE_CASE )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class __lowerCAmelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any]=13 , SCREAMING_SNAKE_CASE : List[str]=7 , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : Tuple=99 , SCREAMING_SNAKE_CASE : Optional[Any]=16 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[str]=4 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Tuple="relu" , SCREAMING_SNAKE_CASE : Tuple=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=0.0 , SCREAMING_SNAKE_CASE : Any=0.0 , SCREAMING_SNAKE_CASE : List[str]=20 , SCREAMING_SNAKE_CASE : Optional[int]=2 , SCREAMING_SNAKE_CASE : Optional[Any]=1 , SCREAMING_SNAKE_CASE : int=0 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = parent
SCREAMING_SNAKE_CASE_ :str = batch_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = seq_length
SCREAMING_SNAKE_CASE_ :Union[str, Any] = is_training
SCREAMING_SNAKE_CASE_ :List[str] = use_labels
SCREAMING_SNAKE_CASE_ :List[str] = vocab_size
SCREAMING_SNAKE_CASE_ :Any = hidden_size
SCREAMING_SNAKE_CASE_ :str = num_hidden_layers
SCREAMING_SNAKE_CASE_ :List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_ :Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ :Any = hidden_act
SCREAMING_SNAKE_CASE_ :Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE_ :str = decoder_layerdrop
SCREAMING_SNAKE_CASE_ :Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ :List[Any] = eos_token_id
SCREAMING_SNAKE_CASE_ :List[str] = pad_token_id
SCREAMING_SNAKE_CASE_ :List[Any] = bos_token_id
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ :int = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE_ :Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE_ :List[str] = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ :Dict = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE_ :Dict = self.get_config()
SCREAMING_SNAKE_CASE_ :List[str] = prepare_mam_aaa_inputs_dict(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return config, inputs_dict
def _lowercase ( self : Any ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).get_decoder().to(SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE_ :Any = inputs_dict['input_ids']
SCREAMING_SNAKE_CASE_ :Optional[int] = inputs_dict['attention_mask']
SCREAMING_SNAKE_CASE_ :List[str] = inputs_dict['head_mask']
# first forward pass
SCREAMING_SNAKE_CASE_ :str = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , head_mask=SCREAMING_SNAKE_CASE , use_cache=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ :Dict = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ :Any = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ :Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE_ :int = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE_ :Optional[Any] = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )['last_hidden_state']
SCREAMING_SNAKE_CASE_ :Any = model(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE , past_key_values=SCREAMING_SNAKE_CASE )[
'last_hidden_state'
]
# select random slice
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE_ :str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE_ :Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , atol=1E-2 ) )
def _lowercase ( self : Any , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = MaMaaaModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE_ :Tuple = model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE_ :Tuple = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ :Tuple = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = MaMaaaEncoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = encoder(inputs_dict['input_ids'] , attention_mask=inputs_dict['attention_mask'] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = MaMaaaDecoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = decoder(
input_ids=inputs_dict['decoder_input_ids'] , attention_mask=inputs_dict['decoder_attention_mask'] , encoder_hidden_states=SCREAMING_SNAKE_CASE , encoder_attention_mask=inputs_dict['attention_mask'] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __lowerCAmelCase( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__snake_case : int = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
__snake_case : List[Any] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
__snake_case : Any = (
{
'conversational': MaMaaaForConditionalGeneration,
'feature-extraction': MaMaaaModel,
'summarization': MaMaaaForConditionalGeneration,
'text2text-generation': MaMaaaForConditionalGeneration,
'translation': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
__snake_case : str = True
__snake_case : List[Any] = True
__snake_case : Any = False
__snake_case : str = False
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE_ :Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :Optional[int] = model_class(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertEqual(info['missing_keys'] , [] )
def _lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE_ :Optional[Any] = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ :Tuple = copy.deepcopy(self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ :Any = inputs['input_ids']
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE_ :Tuple = inputs['input_ids']
SCREAMING_SNAKE_CASE_ :Optional[Any] = inputs.get('decoder_input_ids' , SCREAMING_SNAKE_CASE )
del inputs["input_ids"]
inputs.pop('decoder_input_ids' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[int] = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE_ :int = wte(SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = wte(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = wte(SCREAMING_SNAKE_CASE )
with torch.no_grad():
model(**SCREAMING_SNAKE_CASE )[0]
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ :str = input_dict['input_ids']
SCREAMING_SNAKE_CASE_ :Dict = input_ids.ne(1 ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :str = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE ).eval().to(SCREAMING_SNAKE_CASE )
if torch_device == "cuda":
model.half()
model.generate(SCREAMING_SNAKE_CASE , attention_mask=SCREAMING_SNAKE_CASE )
model.generate(num_beams=4 , do_sample=SCREAMING_SNAKE_CASE , early_stopping=SCREAMING_SNAKE_CASE , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE ):
return torch.tensor(SCREAMING_SNAKE_CASE , dtype=torch.long , device=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class __lowerCAmelCase( unittest.TestCase ):
@cached_property
def _lowercase ( self : List[str] ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :int = MaMaaaModel.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
SCREAMING_SNAKE_CASE_ :int = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
SCREAMING_SNAKE_CASE_ :Optional[int] = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ :Any = model(**SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE_ :List[str] = torch.Size((1, 11, 1_024) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
SCREAMING_SNAKE_CASE_ :Tuple = torch.tensor(
[[-0.77_80, -0.16_76, 0.10_38], [-6.75_56, -1.39_92, 0.05_67], [-7.53_83, -0.59_20, -0.27_79]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def _lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE )
# change to intended input
SCREAMING_SNAKE_CASE_ :List[str] = _long_tensor([[128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38, 2]] )
SCREAMING_SNAKE_CASE_ :str = _long_tensor([[2, 128_028, 98, 12, 30_527, 2_732, 159, 7_755, 61_904, 39_144, 38]] )
SCREAMING_SNAKE_CASE_ :str = prepare_mam_aaa_inputs_dict(model.config , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ :Tuple = model(**SCREAMING_SNAKE_CASE )[0]
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
# change to expected output here
SCREAMING_SNAKE_CASE_ :List[str] = torch.tensor(
[[-1.04_48, -1.04_11, 3.79_92], [-3.21_91, -3.23_86, -1.34_51], [-3.62_10, -3.59_93, 0.49_25]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = MaMaaaForConditionalGeneration.from_pretrained('facebook/m2m100_418M' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = MaMaaaTokenizer.from_pretrained('facebook/m2m100_418M' , src_lang='fr' , tgt_lang='en' )
SCREAMING_SNAKE_CASE_ :List[Any] = [
'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement',
'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.',
'Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'
' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'
' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE_ :Optional[Any] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ :int = model.generate(
input_ids=dct['input_ids'].to(SCREAMING_SNAKE_CASE ) , attention_mask=dct['attention_mask'].to(SCREAMING_SNAKE_CASE ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('en' ) , )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = [
'The NSA case highlights the total absence of intelligence debate',
'I think there are two levels of response from the French government.',
'When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'
' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'
' communications in France.',
]
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE , skip_special_tokens=SCREAMING_SNAKE_CASE )
assert generated == expected_en
| 233
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __lowerCAmelCase:
def __init__( self : List[str] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int]=13 , SCREAMING_SNAKE_CASE : str=64 , SCREAMING_SNAKE_CASE : List[Any]=2 , SCREAMING_SNAKE_CASE : Any=3 , SCREAMING_SNAKE_CASE : Tuple=True , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : Optional[Any]=32 , SCREAMING_SNAKE_CASE : Optional[int]=5 , SCREAMING_SNAKE_CASE : Dict=4 , SCREAMING_SNAKE_CASE : Optional[int]=37 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : List[Any]=10 , SCREAMING_SNAKE_CASE : Dict=0.02 , SCREAMING_SNAKE_CASE : str=[1, 16, 4, 4] , SCREAMING_SNAKE_CASE : int=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = parent
SCREAMING_SNAKE_CASE_ :int = batch_size
SCREAMING_SNAKE_CASE_ :Any = image_size
SCREAMING_SNAKE_CASE_ :Tuple = patch_size
SCREAMING_SNAKE_CASE_ :List[Any] = num_channels
SCREAMING_SNAKE_CASE_ :Dict = is_training
SCREAMING_SNAKE_CASE_ :Tuple = use_labels
SCREAMING_SNAKE_CASE_ :List[Any] = hidden_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ :Dict = num_attention_heads
SCREAMING_SNAKE_CASE_ :Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE_ :Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ :Optional[int] = initializer_range
SCREAMING_SNAKE_CASE_ :Dict = scope
SCREAMING_SNAKE_CASE_ :Union[str, Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
SCREAMING_SNAKE_CASE_ :List[str] = (self.image_size // 32) ** 2
SCREAMING_SNAKE_CASE_ :Optional[int] = num_patches + 1
def _lowercase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ :Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[str] = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [4, 8, 16, 32],
'num_groups': 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = ViTHybridModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ :List[str] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE_ :Any = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
SCREAMING_SNAKE_CASE_ :int = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Tuple = config_and_inputs
SCREAMING_SNAKE_CASE_ :Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ):
__snake_case : Dict = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
__snake_case : int = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
__snake_case : Dict = False
__snake_case : Union[str, Any] = False
__snake_case : List[Any] = False
def _lowercase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = ViTHybridModelTester(self )
SCREAMING_SNAKE_CASE_ :List[str] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=37 )
def _lowercase ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def _lowercase ( self : str ):
"""simple docstring"""
pass
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :Tuple = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ :Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def _lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :int = model_class(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ :List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ :int = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ :Optional[int] = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
SCREAMING_SNAKE_CASE_ :Any = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def _lowercase ( self : Dict ):
"""simple docstring"""
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE_ :Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __lowerCAmelCase( unittest.TestCase ):
@cached_property
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = self.default_image_processor
SCREAMING_SNAKE_CASE_ :Dict = prepare_img()
SCREAMING_SNAKE_CASE_ :str = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model(**SCREAMING_SNAKE_CASE )
# verify the logits
SCREAMING_SNAKE_CASE_ :Any = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = torch.tensor([-1.90_90, -0.49_93, -0.23_89] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1E-4 ) )
@slow
@require_accelerate
def _lowercase ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = ViTHybridImageProcessor.from_pretrained('google/vit-hybrid-base-bit-384' )
SCREAMING_SNAKE_CASE_ :List[Any] = ViTHybridForImageClassification.from_pretrained('google/vit-hybrid-base-bit-384' , device_map='auto' )
SCREAMING_SNAKE_CASE_ :Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ :Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ :Optional[int] = model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = outputs.logits
# model predicts one of the 1000 ImageNet classes
SCREAMING_SNAKE_CASE_ :int = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , 'tabby, tabby cat' )
| 233
| 1
|
'''simple docstring'''
def A (__lowerCamelCase :int = 3 , __lowerCamelCase :int = 7 , __lowerCamelCase :int = 1000000 ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
for current_denominator in range(1 , limit + 1 ):
_lowerCAmelCase = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
_lowerCAmelCase = current_numerator
_lowerCAmelCase = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1000000))
| 5
|
'''simple docstring'''
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
"""The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion"""
)
_lowercase = None
_lowercase = {
"""7B""": 11008,
"""13B""": 13824,
"""30B""": 17920,
"""65B""": 22016,
"""70B""": 28672,
}
_lowercase = {
"""7B""": 1,
"""7Bf""": 1,
"""13B""": 2,
"""13Bf""": 2,
"""30B""": 4,
"""65B""": 8,
"""70B""": 8,
"""70Bf""": 8,
}
def A (__lowerCamelCase :int , __lowerCamelCase :Optional[Any]=1 , __lowerCamelCase :List[Any]=256 ):
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def A (__lowerCamelCase :Any ):
with open(__lowerCamelCase , """r""" ) as f:
return json.load(__lowerCamelCase )
def A (__lowerCamelCase :List[Any] , __lowerCamelCase :int ):
with open(__lowerCamelCase , """w""" ) as f:
json.dump(__lowerCamelCase , __lowerCamelCase )
def A (__lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple , __lowerCamelCase :Optional[Any] , __lowerCamelCase :Tuple=True ):
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = os.path.join(__lowerCamelCase , """tmp""" )
os.makedirs(__lowerCamelCase , exist_ok=__lowerCamelCase )
_lowerCAmelCase = read_json(os.path.join(__lowerCamelCase , """params.json""" ) )
_lowerCAmelCase = NUM_SHARDS[model_size]
_lowerCAmelCase = params["""n_layers"""]
_lowerCAmelCase = params["""n_heads"""]
_lowerCAmelCase = n_heads // num_shards
_lowerCAmelCase = params["""dim"""]
_lowerCAmelCase = dim // n_heads
_lowerCAmelCase = 10_000.0
_lowerCAmelCase = 1.0 / (base ** (torch.arange(0 , __lowerCamelCase , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
_lowerCAmelCase = params["""n_kv_heads"""] # for GQA / MQA
_lowerCAmelCase = n_heads_per_shard // num_key_value_heads
_lowerCAmelCase = dim // num_key_value_heads
else: # compatibility with other checkpoints
_lowerCAmelCase = n_heads
_lowerCAmelCase = n_heads_per_shard
_lowerCAmelCase = dim
# permute for sliced rotary
def permute(__lowerCamelCase :Optional[int] , __lowerCamelCase :str=n_heads , __lowerCamelCase :str=dim , __lowerCamelCase :List[Any]=dim ):
return w.view(__lowerCamelCase , dima // n_heads // 2 , 2 , __lowerCamelCase ).transpose(1 , 2 ).reshape(__lowerCamelCase , __lowerCamelCase )
print(f'Fetching all parameters from the checkpoint at {input_base_path}.' )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
_lowerCAmelCase = torch.load(os.path.join(__lowerCamelCase , """consolidated.00.pth""" ) , map_location="""cpu""" )
else:
# Sharded
_lowerCAmelCase = [
torch.load(os.path.join(__lowerCamelCase , f'consolidated.{i:02d}.pth' ) , map_location="""cpu""" )
for i in range(__lowerCamelCase )
]
_lowerCAmelCase = 0
_lowerCAmelCase = {"""weight_map""": {}}
for layer_i in range(__lowerCamelCase ):
_lowerCAmelCase = f'pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
f'model.layers.{layer_i}.self_attn.q_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wq.weight'] ),
f'model.layers.{layer_i}.self_attn.k_proj.weight': permute(
loaded[f'layers.{layer_i}.attention.wk.weight'] ),
f'model.layers.{layer_i}.self_attn.v_proj.weight': loaded[f'layers.{layer_i}.attention.wv.weight'],
f'model.layers.{layer_i}.self_attn.o_proj.weight': loaded[f'layers.{layer_i}.attention.wo.weight'],
f'model.layers.{layer_i}.mlp.gate_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w1.weight'],
f'model.layers.{layer_i}.mlp.down_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w2.weight'],
f'model.layers.{layer_i}.mlp.up_proj.weight': loaded[f'layers.{layer_i}.feed_forward.w3.weight'],
f'model.layers.{layer_i}.input_layernorm.weight': loaded[f'layers.{layer_i}.attention_norm.weight'],
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[f'layers.{layer_i}.ffn_norm.weight'],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
_lowerCAmelCase = {
f'model.layers.{layer_i}.input_layernorm.weight': loaded[0][
f'layers.{layer_i}.attention_norm.weight'
].clone(),
f'model.layers.{layer_i}.post_attention_layernorm.weight': loaded[0][
f'layers.{layer_i}.ffn_norm.weight'
].clone(),
}
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wq.weight'].view(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = permute(
torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wk.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , )
_lowerCAmelCase = torch.cat(
[
loaded[i][f'layers.{layer_i}.attention.wv.weight'].view(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
for i in range(__lowerCamelCase )
] , dim=0 , ).reshape(__lowerCamelCase , __lowerCamelCase )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.attention.wo.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w1.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w2.weight'] for i in range(__lowerCamelCase )] , dim=1 )
_lowerCAmelCase = torch.cat(
[loaded[i][f'layers.{layer_i}.feed_forward.w3.weight'] for i in range(__lowerCamelCase )] , dim=0 )
_lowerCAmelCase = inv_freq
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
_lowerCAmelCase = f'pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin'
if model_size == "7B":
# Unsharded
_lowerCAmelCase = {
"""model.embed_tokens.weight""": loaded["""tok_embeddings.weight"""],
"""model.norm.weight""": loaded["""norm.weight"""],
"""lm_head.weight""": loaded["""output.weight"""],
}
else:
_lowerCAmelCase = {
"""model.norm.weight""": loaded[0]["""norm.weight"""],
"""model.embed_tokens.weight""": torch.cat(
[loaded[i]["""tok_embeddings.weight"""] for i in range(__lowerCamelCase )] , dim=1 ),
"""lm_head.weight""": torch.cat([loaded[i]["""output.weight"""] for i in range(__lowerCamelCase )] , dim=0 ),
}
for k, v in state_dict.items():
_lowerCAmelCase = filename
param_count += v.numel()
torch.save(__lowerCamelCase , os.path.join(__lowerCamelCase , __lowerCamelCase ) )
# Write configs
_lowerCAmelCase = {"""total_size""": param_count * 2}
write_json(__lowerCamelCase , os.path.join(__lowerCamelCase , """pytorch_model.bin.index.json""" ) )
_lowerCAmelCase = params["""ffn_dim_multiplier"""] if """ffn_dim_multiplier""" in params else 1
_lowerCAmelCase = params["""multiple_of"""] if """multiple_of""" in params else 256
_lowerCAmelCase = LlamaConfig(
hidden_size=__lowerCamelCase , intermediate_size=compute_intermediate_size(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) , num_attention_heads=params["""n_heads"""] , num_hidden_layers=params["""n_layers"""] , rms_norm_eps=params["""norm_eps"""] , num_key_value_heads=__lowerCamelCase , )
config.save_pretrained(__lowerCamelCase )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("""Loading the checkpoint in a Llama model.""" )
_lowerCAmelCase = LlamaForCausalLM.from_pretrained(__lowerCamelCase , torch_dtype=torch.floataa , low_cpu_mem_usage=__lowerCamelCase )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("""Saving in the Transformers format.""" )
model.save_pretrained(__lowerCamelCase , safe_serialization=__lowerCamelCase )
shutil.rmtree(__lowerCamelCase )
def A (__lowerCamelCase :Union[str, Any] , __lowerCamelCase :Union[str, Any] ):
# Initialize the tokenizer based on the `spm` model
_lowerCAmelCase = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f'Saving a {tokenizer_class.__name__} to {tokenizer_path}.' )
_lowerCAmelCase = tokenizer_class(__lowerCamelCase )
tokenizer.save_pretrained(__lowerCamelCase )
def A ():
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--input_dir""" , help="""Location of LLaMA weights, which contains tokenizer.model and model folders""" , )
parser.add_argument(
"""--model_size""" , choices=["""7B""", """7Bf""", """13B""", """13Bf""", """30B""", """65B""", """70B""", """70Bf""", """tokenizer_only"""] , )
parser.add_argument(
"""--output_dir""" , help="""Location to write HF model and tokenizer""" , )
parser.add_argument("""--safe_serialization""" , type=__lowerCamelCase , help="""Whether or not to save using `safetensors`.""" )
_lowerCAmelCase = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
_lowerCAmelCase = os.path.join(args.input_dir , """tokenizer.model""" )
write_tokenizer(args.output_dir , __lowerCamelCase )
if __name__ == "__main__":
main()
| 5
| 1
|
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 240
|
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
_UpperCAmelCase = '2.13.1'
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse('3.7'):
raise ImportWarning(
'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.'
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n'
'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.'
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
_UpperCAmelCase = concatenate_datasets
_UpperCAmelCase = DownloadConfig
_UpperCAmelCase = DownloadManager
_UpperCAmelCase = DownloadMode
_UpperCAmelCase = DownloadConfig
_UpperCAmelCase = DownloadMode
_UpperCAmelCase = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 240
| 1
|
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (32, 32)
__SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowerCAmelCase )
return image
@property
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
return model
@property
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def _A ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModel(_lowerCAmelCase )
@property
def _A ( self ):
'''simple docstring'''
def extract(*_A , **_A ):
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.ones([0] )
def _A ( self , _A ):
'''simple docstring'''
self.pixel_values.to(_lowerCAmelCase )
return self
return Out()
return extract
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=_lowerCAmelCase , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'cpu' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=_lowerCAmelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='np' , return_dict=_lowerCAmelCase , )[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-lms-pipe' , safety_checker=_lowerCAmelCase )
assert isinstance(_lowerCAmelCase , _lowerCAmelCase )
assert isinstance(pipe.scheduler , _lowerCAmelCase )
assert pipe.safety_checker is None
__SCREAMING_SNAKE_CASE = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
__SCREAMING_SNAKE_CASE = pipe('example prompt' , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet
__SCREAMING_SNAKE_CASE = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
# put models in fp16
__SCREAMING_SNAKE_CASE = unet.half()
__SCREAMING_SNAKE_CASE = vae.half()
__SCREAMING_SNAKE_CASE = bert.half()
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline(
unet=_lowerCAmelCase , scheduler=_lowerCAmelCase , vae=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , safety_checker=_lowerCAmelCase , feature_extractor=self.dummy_extractor , )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = 'A painting of a squirrel eating a burger'
__SCREAMING_SNAKE_CASE = sd_pipe([prompt] , num_inference_steps=2 , output_type='np' ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
__SCREAMING_SNAKE_CASE = 4_003_660_346
__SCREAMING_SNAKE_CASE = 7
# without safety guidance (sld_guidance_scale = 0)
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' , safety_checker=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = 'padme amidala taking a bath artwork, safe for work, no nudity'
__SCREAMING_SNAKE_CASE = 2_734_971_755
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained('runwayml/stable-diffusion-v1-5' )
__SCREAMING_SNAKE_CASE = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
__SCREAMING_SNAKE_CASE = 1_044_355_234
__SCREAMING_SNAKE_CASE = 12
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=0 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
__SCREAMING_SNAKE_CASE = torch.manual_seed(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] , generator=_lowerCAmelCase , guidance_scale=_lowerCAmelCase , num_inference_steps=50 , output_type='np' , width=512 , height=512 , sld_guidance_scale=2_000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 148
|
'''simple docstring'''
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = tokenizer.decode(greedy_ids[0] )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> List[str]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase )
_lowerCAmelCase = greedy_ids[:, input_ids.shape[1] :]
_lowerCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_prompt=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=10 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_lowerCAmelCase = cs.out[:-1]
self.assertEqual(_lowerCAmelCase , _lowerCAmelCase )
def _snake_case ( self ) -> Dict:
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
_lowerCAmelCase = AutoTokenizer.from_pretrained("distilgpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = torch.ones((1, 5) , device=_lowerCAmelCase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_lowerCAmelCase = TextStreamer(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
model.generate(_lowerCAmelCase , max_new_tokens=1 , do_sample=_lowerCAmelCase , streamer=_lowerCAmelCase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_lowerCAmelCase = cs.out[:-1] # Remove the final "\n"
_lowerCAmelCase = tokenizer(_lowerCAmelCase , return_tensors="pt" )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def _snake_case ( self ) -> Union[str, Any]:
_lowerCAmelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
_lowerCAmelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(_lowerCAmelCase )
_lowerCAmelCase = -1
_lowerCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowerCAmelCase )
_lowerCAmelCase = TextIteratorStreamer(_lowerCAmelCase , timeout=0.001 )
_lowerCAmelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_lowerCAmelCase = Thread(target=model.generate , kwargs=_lowerCAmelCase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowerCAmelCase ):
_lowerCAmelCase = ""
for new_text in streamer:
streamer_text += new_text
| 18
| 0
|
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class A__ :
def __init__( self , lowerCamelCase , lowerCamelCase=100 , lowerCamelCase=13 , lowerCamelCase=30 , lowerCamelCase=2 , lowerCamelCase=3 , lowerCamelCase=True , lowerCamelCase=True , lowerCamelCase=32 , lowerCamelCase=4 , lowerCamelCase=4 , lowerCamelCase=37 , lowerCamelCase="gelu" , lowerCamelCase=0.1 , lowerCamelCase=0.1 , lowerCamelCase=10 , lowerCamelCase=0.0_2 , lowerCamelCase=3 , lowerCamelCase=None , lowerCamelCase=[0, 1, 2, 3] , ) -> List[str]:
"""simple docstring"""
__magic_name__ : int = parent
__magic_name__ : Optional[Any] = 100
__magic_name__ : List[Any] = batch_size
__magic_name__ : Optional[Any] = image_size
__magic_name__ : str = patch_size
__magic_name__ : str = num_channels
__magic_name__ : List[str] = is_training
__magic_name__ : str = use_labels
__magic_name__ : List[str] = hidden_size
__magic_name__ : str = num_hidden_layers
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : int = hidden_dropout_prob
__magic_name__ : List[str] = attention_probs_dropout_prob
__magic_name__ : List[str] = type_sequence_label_size
__magic_name__ : Dict = initializer_range
__magic_name__ : Optional[Any] = scope
__magic_name__ : Dict = out_indices
__magic_name__ : Optional[Any] = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Any = (image_size // patch_size) ** 2
__magic_name__ : Dict = num_patches + 1
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Tuple = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__magic_name__ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase ( self ) -> Tuple:
"""simple docstring"""
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : str = BeitModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__magic_name__ : int = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : List[str] = BeitForMaskedImageModeling(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__magic_name__ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Any = self.type_sequence_label_size
__magic_name__ : Optional[int] = BeitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__magic_name__ : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__magic_name__ : Dict = 1
__magic_name__ : int = BeitForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__magic_name__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__magic_name__ : Dict = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Optional[Any] = self.num_labels
__magic_name__ : int = BeitForSemanticSegmentation(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__magic_name__ : Dict = model(lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
__magic_name__ : Union[str, Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : List[str] = config_and_inputs
__magic_name__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( lowercase_ , lowercase_ , unittest.TestCase ):
lowerCamelCase__ : str =(
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase__ : List[Any] =(
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase__ : List[str] =False
lowerCamelCase__ : Dict =False
lowerCamelCase__ : str =False
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : str = BeitModelTester(self )
__magic_name__ : Dict = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
pass
def lowercase ( self ) -> Tuple:
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : List[str] = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__magic_name__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def lowercase ( self ) -> List[str]:
"""simple docstring"""
__magic_name__ , __magic_name__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__magic_name__ : Optional[Any] = model_class(lowerCamelCase )
__magic_name__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__magic_name__ : Dict = [*signature.parameters.keys()]
__magic_name__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def lowercase ( self ) -> Union[str, Any]:
"""simple docstring"""
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowerCamelCase )
def lowercase ( self ) -> Tuple:
"""simple docstring"""
if not self.model_tester.is_training:
return
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : int = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(lowerCamelCase ), BeitForMaskedImageModeling]:
continue
__magic_name__ : Tuple = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.train()
__magic_name__ : Optional[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__magic_name__ : Union[str, Any] = model(**lowerCamelCase ).loss
loss.backward()
def lowercase ( self ) -> int:
"""simple docstring"""
__magic_name__ , __magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__magic_name__ : Tuple = False
__magic_name__ : List[str] = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(lowerCamelCase ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
__magic_name__ : Dict = model_class(lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(lowerCamelCase )
model.train()
__magic_name__ : str = self._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
__magic_name__ : str = model(**lowerCamelCase ).loss
loss.backward()
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ , __magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[Any] = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = model_class(config=lowerCamelCase )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowercase ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : Any = BeitModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def lowerCAmelCase ( ) ->List[str]:
"""simple docstring"""
__magic_name__ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
@cached_property
def lowercase ( self ) -> List[str]:
"""simple docstring"""
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : Union[str, Any] = BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(lowerCamelCase )
__magic_name__ : Optional[int] = self.default_image_processor
__magic_name__ : int = prepare_img()
__magic_name__ : List[str] = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).pixel_values.to(lowerCamelCase )
# prepare bool_masked_pos
__magic_name__ : List[Any] = torch.ones((1, 196) , dtype=torch.bool ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : List[str] = model(pixel_values=lowerCamelCase , bool_masked_pos=lowerCamelCase )
__magic_name__ : List[Any] = outputs.logits
# verify the logits
__magic_name__ : Union[str, Any] = torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , lowerCamelCase )
__magic_name__ : Optional[int] = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , lowerCamelCase , atol=1e-2 ) )
@slow
def lowercase ( self ) -> Optional[Any]:
"""simple docstring"""
__magic_name__ : List[Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(lowerCamelCase )
__magic_name__ : Tuple = self.default_image_processor
__magic_name__ : Tuple = prepare_img()
__magic_name__ : List[str] = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : str = model(**lowerCamelCase )
__magic_name__ : Optional[int] = outputs.logits
# verify the logits
__magic_name__ : Tuple = torch.Size((1, 1000) )
self.assertEqual(logits.shape , lowerCamelCase )
__magic_name__ : List[str] = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
__magic_name__ : Tuple = 281
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase )
@slow
def lowercase ( self ) -> Dict:
"""simple docstring"""
__magic_name__ : Union[str, Any] = BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
lowerCamelCase )
__magic_name__ : str = self.default_image_processor
__magic_name__ : int = prepare_img()
__magic_name__ : str = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : Dict = model(**lowerCamelCase )
__magic_name__ : str = outputs.logits
# verify the logits
__magic_name__ : List[str] = torch.Size((1, 21841) )
self.assertEqual(logits.shape , lowerCamelCase )
__magic_name__ : Tuple = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCamelCase , atol=1e-4 ) )
__magic_name__ : Union[str, Any] = 2396
self.assertEqual(logits.argmax(-1 ).item() , lowerCamelCase )
@slow
def lowercase ( self ) -> Any:
"""simple docstring"""
__magic_name__ : Optional[int] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
__magic_name__ : str = model.to(lowerCamelCase )
__magic_name__ : Optional[Any] = BeitImageProcessor(do_resize=lowerCamelCase , size=640 , do_center_crop=lowerCamelCase )
__magic_name__ : List[str] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ : Optional[int] = Image.open(ds[0]['''file'''] )
__magic_name__ : Any = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : Tuple = model(**lowerCamelCase )
__magic_name__ : Dict = outputs.logits
# verify the logits
__magic_name__ : Dict = torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , lowerCamelCase )
__magic_name__ : List[str] = version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
__magic_name__ : Tuple = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=lowerCamelCase , )
else:
__magic_name__ : List[Any] = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=lowerCamelCase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1e-4 ) )
@slow
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
__magic_name__ : Optional[Any] = BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
__magic_name__ : Optional[int] = model.to(lowerCamelCase )
__magic_name__ : Dict = BeitImageProcessor(do_resize=lowerCamelCase , size=640 , do_center_crop=lowerCamelCase )
__magic_name__ : List[Any] = load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
__magic_name__ : Any = Image.open(ds[0]['''file'''] )
__magic_name__ : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors='''pt''' ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
__magic_name__ : Optional[Any] = model(**lowerCamelCase )
__magic_name__ : str = outputs.logits.detach().cpu()
__magic_name__ : str = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase , target_sizes=[(500, 300)] )
__magic_name__ : Optional[int] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
__magic_name__ : int = image_processor.post_process_semantic_segmentation(outputs=lowerCamelCase )
__magic_name__ : Dict = torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , lowerCamelCase )
| 720
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['''CLIPFeatureExtractor''']
lowercase_ = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 336
| 0
|
"""simple docstring"""
def UpperCAmelCase ( A : Tuple ):
'''simple docstring'''
_UpperCAmelCase = len(__lowercase )
_UpperCAmelCase = sum(__lowercase )
_UpperCAmelCase = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
_UpperCAmelCase = True
for i in range(1 , s + 1 ):
_UpperCAmelCase = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
_UpperCAmelCase = dp[i][j - 1]
if arr[i - 1] <= j:
_UpperCAmelCase = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
_UpperCAmelCase = s - 2 * j
break
return diff
| 573
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__SCREAMING_SNAKE_CASE = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 357
| 0
|
"""simple docstring"""
import math
def _lowerCAmelCase ( lowerCamelCase__ : float, lowerCamelCase__ : float ) -> float:
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowerCamelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='''malus_law''')
| 295
|
"""simple docstring"""
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
"files", [
["full:README.md", "dataset_infos.json"],
["empty:README.md", "dataset_infos.json"],
["dataset_infos.json"],
["full:README.md"],
], )
def _lowerCAmelCase ( lowerCamelCase__ : str, lowerCamelCase__ : str ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = tmp_path_factory.mktemp("dset_infos_dir" )
if "full:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("---\ndataset_info:\n dataset_size: 42\n---" )
if "empty:README.md" in files:
with open(dataset_infos_dir / "README.md", "w" ) as f:
f.write("" )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / "dataset_infos.json", "w" ) as f:
f.write("{\"default\": {\"dataset_size\": 42}}" )
_SCREAMING_SNAKE_CASE : Optional[Any] = DatasetInfosDict.from_directory(lowerCamelCase__ )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 4_2
@pytest.mark.parametrize(
"dataset_info", [
DatasetInfo(),
DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=4_2, ),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Union[str, Any], lowerCamelCase__ : DatasetInfo ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Dict = str(lowerCamelCase__ )
dataset_info.write_to_directory(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : int = DatasetInfo.from_directory(lowerCamelCase__ )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(lowerCamelCase__, "dataset_info.json" ) )
def _lowerCAmelCase ( ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = DatasetInfo(
description="foo", citation="bar", homepage="https://foo.bar", license="CC0", features=Features({"a": Value("int32" )} ), post_processed={}, supervised_keys=(), task_templates=[], builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train", "num_examples": 4_2}], download_checksums={}, download_size=1_3_3_7, post_processing_size=4_4_2, dataset_size=1_2_3_4, size_in_bytes=1_3_3_7 + 4_4_2 + 1_2_3_4, )
_SCREAMING_SNAKE_CASE : List[str] = dataset_info._to_yaml_dict()
assert sorted(lowerCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key], (list, dict, int, str) )
_SCREAMING_SNAKE_CASE : Optional[Any] = yaml.safe_dump(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : List[Any] = yaml.safe_load(lowerCamelCase__ )
assert dataset_info_yaml_dict == reloaded
def _lowerCAmelCase ( ) -> Tuple:
_SCREAMING_SNAKE_CASE : Dict = DatasetInfo()
_SCREAMING_SNAKE_CASE : List[Any] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
"dataset_infos_dict", [
DatasetInfosDict(),
DatasetInfosDict({"default": DatasetInfo()} ),
DatasetInfosDict({"my_config_name": DatasetInfo()} ),
DatasetInfosDict(
{
"default": DatasetInfo(
description="foo", features=Features({"a": Value("int32" )} ), builder_name="builder", config_name="config", version="1.0.0", splits=[{"name": "train"}], download_size=4_2, )
} ),
DatasetInfosDict(
{
"v1": DatasetInfo(dataset_size=4_2 ),
"v2": DatasetInfo(dataset_size=1_3_3_7 ),
} ),
], )
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : DatasetInfosDict ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : str = str(lowerCamelCase__ )
dataset_infos_dict.write_to_directory(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : str = DatasetInfosDict.from_directory(lowerCamelCase__ )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
_SCREAMING_SNAKE_CASE : List[Any] = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
_SCREAMING_SNAKE_CASE : Dict = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(lowerCamelCase__, "README.md" ) )
| 295
| 1
|
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[Any] = 10_00 ) -> Optional[int]:
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 31
|
'''simple docstring'''
import sys
from typing import Tuple
import numpy as np
import torch
from PIL import Image
from torch import nn
from transformers.image_utils import PILImageResampling
from utils import img_tensorize
class UpperCAmelCase :
def __init__(self : Optional[Any] , A__ : Optional[Any] , A__ : Optional[int]=sys.maxsize ) -> Optional[Any]:
lowercase = "bilinear"
lowercase = max_size
lowercase = short_edge_length
def __call__(self : Union[str, Any] , A__ : Optional[int] ) -> Tuple:
lowercase = []
for img in imgs:
lowercase , lowercase = img.shape[:2]
# later: provide list and randomly choose index for resize
lowercase = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 )
if size == 0:
return img
lowercase = size * 1.0 / min(A__ , A__ )
if h < w:
lowercase , lowercase = size, scale * w
else:
lowercase , lowercase = scale * h, size
if max(A__ , A__ ) > self.max_size:
lowercase = self.max_size * 1.0 / max(A__ , A__ )
lowercase = newh * scale
lowercase = neww * scale
lowercase = int(neww + 0.5 )
lowercase = int(newh + 0.5 )
if img.dtype == np.uinta:
lowercase = Image.fromarray(A__ )
lowercase = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR )
lowercase = np.asarray(A__ )
else:
lowercase = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw
lowercase = nn.functional.interpolate(
A__ , (newh, neww) , mode=self.interp_method , align_corners=A__ ).squeeze(0 )
img_augs.append(A__ )
return img_augs
class UpperCAmelCase :
def __init__(self : Union[str, Any] , A__ : List[Any] ) -> Optional[int]:
lowercase = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST )
lowercase = cfg.INPUT.FORMAT
lowercase = cfg.SIZE_DIVISIBILITY
lowercase = cfg.PAD_VALUE
lowercase = cfg.INPUT.MAX_SIZE_TEST
lowercase = cfg.MODEL.DEVICE
lowercase = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 )
lowercase = lambda A__ : (x - self.pixel_mean) / self.pixel_std
def UpperCAmelCase__ (self : List[Any] , A__ : Any ) -> int:
lowercase = tuple(max(A__ ) for s in zip(*[img.shape for img in images] ) )
lowercase = [im.shape[-2:] for im in images]
lowercase = [
nn.functional.pad(
A__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , )
for size, im in zip(A__ , A__ )
]
return torch.stack(A__ ), torch.tensor(A__ )
def __call__(self : Optional[int] , A__ : Union[str, Any] , A__ : Optional[Any]=False ) -> str:
with torch.no_grad():
if not isinstance(A__ , A__ ):
lowercase = [images]
if single_image:
assert len(A__ ) == 1
for i in range(len(A__ ) ):
if isinstance(images[i] , torch.Tensor ):
images.insert(A__ , images.pop(A__ ).to(self.device ).float() )
elif not isinstance(images[i] , torch.Tensor ):
images.insert(
A__ , torch.as_tensor(img_tensorize(images.pop(A__ ) , input_format=self.input_format ) )
.to(self.device )
.float() , )
# resize smallest edge
lowercase = torch.tensor([im.shape[:2] for im in images] )
lowercase = self.aug(A__ )
# transpose images and convert to torch tensors
# images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images]
# now normalize before pad to avoid useless arithmetic
lowercase = [self.normalizer(A__ ) for x in images]
# now pad them to do the following operations
lowercase , lowercase = self.pad(A__ )
# Normalize
if self.size_divisibility > 0:
raise NotImplementedError()
# pad
lowercase = torch.true_divide(A__ , A__ )
if single_image:
return images[0], sizes[0], scales_yx[0]
else:
return images, sizes, scales_yx
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
boxes[:, 0::2] *= scale_yx[:, 1]
boxes[:, 1::2] *= scale_yx[:, 0]
return boxes
def UpperCAmelCase_ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
assert torch.isfinite(lowerCAmelCase_ ).all(), "Box tensor contains infinite or NaN!"
lowercase , lowercase = box_size
tensor[:, 0].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 1].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 2].clamp_(min=0 , max=lowerCAmelCase_ )
tensor[:, 3].clamp_(min=0 , max=lowerCAmelCase_ )
| 310
| 0
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_SCREAMING_SNAKE_CASE = random.Random()
def __UpperCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=1.0 , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
__snake_case = global_rng
__snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __magic_name__ ( unittest.TestCase ):
def __init__( self : int , snake_case_ : str , snake_case_ : Optional[Any]=7 , snake_case_ : List[str]=400 , snake_case_ : Optional[int]=2000 , snake_case_ : List[Any]=10 , snake_case_ : Dict=160 , snake_case_ : Any=8 , snake_case_ : Union[str, Any]=0.0 , snake_case_ : List[str]=4000 , snake_case_ : Tuple=False , snake_case_ : List[str]=True , ):
__snake_case = parent
__snake_case = batch_size
__snake_case = min_seq_length
__snake_case = max_seq_length
__snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case = padding_value
__snake_case = sampling_rate
__snake_case = return_attention_mask
__snake_case = do_normalize
__snake_case = feature_size
__snake_case = chunk_length
__snake_case = hop_length
def lowerCAmelCase ( self : Optional[int] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : Union[str, Any]=False , snake_case_ : Optional[Any]=False ):
def _flatten(snake_case_ : Union[str, Any] ):
return list(itertools.chain(*snake_case_ ) )
if equal_length:
__snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case = [np.asarray(snake_case_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __magic_name__ ( lowercase__ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = WhisperFeatureExtractor if is_speech_available() else None
def lowerCAmelCase ( self : Optional[int] ):
__snake_case = WhisperFeatureExtractionTester(self )
def lowerCAmelCase ( self : Any ):
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = feat_extract_first.save_pretrained(snake_case_ )[0]
check_json_file_has_correct_format(snake_case_ )
__snake_case = self.feature_extraction_class.from_pretrained(snake_case_ )
__snake_case = feat_extract_first.to_dict()
__snake_case = feat_extract_second.to_dict()
__snake_case = feat_extract_first.mel_filters
__snake_case = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase ( self : Tuple ):
__snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case = os.path.join(snake_case_ , "feat_extract.json" )
feat_extract_first.to_json_file(snake_case_ )
__snake_case = self.feature_extraction_class.from_json_file(snake_case_ )
__snake_case = feat_extract_first.to_dict()
__snake_case = feat_extract_second.to_dict()
__snake_case = feat_extract_first.mel_filters
__snake_case = feat_extract_second.mel_filters
self.assertTrue(np.allclose(snake_case_ , snake_case_ ) )
self.assertEqual(snake_case_ , snake_case_ )
def lowerCAmelCase ( self : Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__snake_case = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
# Test feature size
__snake_case = feature_extractor(snake_case_ , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
__snake_case = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
__snake_case = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test batched
__snake_case = feature_extractor(snake_case_ , return_tensors="np" ).input_features
__snake_case = feature_extractor(snake_case_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case = np.asarray(snake_case_ )
__snake_case = feature_extractor(snake_case_ , return_tensors="np" ).input_features
__snake_case = feature_extractor(snake_case_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
# Test truncation required
__snake_case = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
__snake_case = [np.asarray(snake_case_ ) for speech_input in speech_inputs]
__snake_case = [x[: feature_extractor.n_samples] for x in speech_inputs]
__snake_case = [np.asarray(snake_case_ ) for speech_input in speech_inputs_truncated]
__snake_case = feature_extractor(snake_case_ , return_tensors="np" ).input_features
__snake_case = feature_extractor(snake_case_ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(snake_case_ , snake_case_ ):
self.assertTrue(np.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def lowerCAmelCase ( self : List[str] ):
import torch
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case = np.random.rand(100 , 32 ).astype(np.floataa )
__snake_case = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__snake_case = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowerCAmelCase ( self : Union[str, Any] , snake_case_ : int ):
__snake_case = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__snake_case = ds.sort("id" ).select(range(snake_case_ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def lowerCAmelCase ( self : Any ):
# fmt: off
__snake_case = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
__snake_case = self._load_datasamples(1 )
__snake_case = WhisperFeatureExtractor()
__snake_case = feature_extractor(snake_case_ , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , snake_case_ , atol=1e-4 ) )
def lowerCAmelCase ( self : Any ):
__snake_case = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case = self._load_datasamples(1 )[0]
__snake_case = ((audio - audio.min()) / (audio.max() - audio.min())) * 65535 # Rescale to [0, 65535] to show issue
__snake_case = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=snake_case_ )[0]
self.assertTrue(np.all(np.mean(snake_case_ ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(snake_case_ ) - 1 ) < 1e-3 ) )
| 614
|
"""simple docstring"""
import baseaa
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> bytes:
"""simple docstring"""
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCamelCase ( SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
return baseaa.baadecode(SCREAMING_SNAKE_CASE ).decode("utf-8" )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = """Hello World!"""
_SCREAMING_SNAKE_CASE = baseaa_encode(test)
print(encoded)
_SCREAMING_SNAKE_CASE = baseaa_decode(encoded)
print(decoded)
| 614
| 1
|
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowercase_ = False
lowercase_ = True
lowercase_ = False
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
lowercase_ = parser.parse_args()
lowercase_ = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
lowercase_ = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
lowercase_ = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
lowercase_ = reader.read()
lowercase_ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
lowercase_ = UNetaDModel(**config)
else:
lowercase_ = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
lowercase_ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowercase_ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowercase_ = config[key]
del config[key]
lowercase_ = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
lowercase_ = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
lowercase_ = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
lowercase_ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
lowercase_ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
lowercase_ = param_value
lowercase_ = True
if not has_changed:
lowercase_ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 74
|
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[int] = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = '''xmod'''
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple=30_522 , _SCREAMING_SNAKE_CASE : Tuple=768 , _SCREAMING_SNAKE_CASE : Optional[int]=12 , _SCREAMING_SNAKE_CASE : List[str]=12 , _SCREAMING_SNAKE_CASE : Optional[Any]=3_072 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : List[Any]=0.1 , _SCREAMING_SNAKE_CASE : Dict=0.1 , _SCREAMING_SNAKE_CASE : str=512 , _SCREAMING_SNAKE_CASE : Tuple=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.0_2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1E-1_2 , _SCREAMING_SNAKE_CASE : List[str]=1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0 , _SCREAMING_SNAKE_CASE : Optional[Any]=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]="absolute" , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : Dict=2 , _SCREAMING_SNAKE_CASE : str=False , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Tuple=("en_XX",) , _SCREAMING_SNAKE_CASE : Optional[int]=None , **_SCREAMING_SNAKE_CASE : List[Any] , ) -> List[str]:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : Any = hidden_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE : str = num_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = hidden_act
SCREAMING_SNAKE_CASE : Any = intermediate_size
SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE : str = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Union[str, Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : str = type_vocab_size
SCREAMING_SNAKE_CASE : List[str] = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE : Tuple = position_embedding_type
SCREAMING_SNAKE_CASE : int = use_cache
SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE : List[Any] = pre_norm
SCREAMING_SNAKE_CASE : int = adapter_reduction_factor
SCREAMING_SNAKE_CASE : List[Any] = adapter_layer_norm
SCREAMING_SNAKE_CASE : Any = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE : Any = ln_before_adapter
SCREAMING_SNAKE_CASE : Tuple = list(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : List[Any] = default_language
class lowerCAmelCase__ ( _lowerCamelCase ):
'''simple docstring'''
@property
def _lowerCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE : List[str] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 265
| 0
|
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = VideoToVideoSDPipeline
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
__lowerCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
__lowerCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
__lowerCAmelCase = False
# No `output_type`.
__lowerCAmelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _lowerCamelCase ( self ):
torch.manual_seed(0 )
__a : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''CrossAttnDownBlock3D''', '''DownBlock3D''') , up_block_types=('''UpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''', '''CrossAttnUpBlock3D''') , cross_attention_dim=32 , attention_head_dim=4 , )
__a : Optional[int] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
__a : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''gelu''' , projection_dim=512 , )
__a : Optional[int] = CLIPTextModel(_UpperCAmelCase )
__a : Optional[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
__a : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
# 3 frames
__a : List[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith('''mps''' ):
__a : Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
else:
__a : Optional[Any] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
__a : Dict = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''video''': video,
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''pt''',
}
return inputs
def _lowerCamelCase ( self ):
__a : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__a : Optional[Any] = self.get_dummy_components()
__a : int = VideoToVideoSDPipeline(**_UpperCAmelCase )
__a : Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
__a : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
__a : Optional[Any] = '''np'''
__a : int = sd_pipe(**_UpperCAmelCase ).frames
__a : Union[str, Any] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__a : Any = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def _lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_UpperCAmelCase , expected_max_diff=5e-3 )
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''Batching needs to be properly figured out first for this pipeline.''' )
def _lowerCamelCase ( self ):
pass
@unittest.skip(reason='''`num_images_per_prompt` argument is not supported for this pipeline.''' )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
__a : Optional[Any] = VideoToVideoSDPipeline.from_pretrained('''cerspense/zeroscope_v2_XL''' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__a : List[str] = torch.Generator(device='''cpu''' ).manual_seed(0 )
__a : Union[str, Any] = torch.randn((1, 10, 3, 1024, 576) , generator=_UpperCAmelCase )
__a : Tuple = video.to('''cuda''' )
__a : str = '''Spiderman is surfing'''
__a : List[str] = pipe(_UpperCAmelCase , video=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=3 , output_type='''pt''' ).frames
__a : Any = np.array([-1.0_4_5_8_9_8_4, -1.1_2_7_9_2_9_7, -0.9_6_6_3_0_8_6, -0.9_1_5_0_3_9_0_6, -0.7_5_0_9_7_6_5_6] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 101
|
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'''microsoft/git-base''': '''https://huggingface.co/microsoft/git-base/resolve/main/config.json''',
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''git_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=3072 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3 , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase="quick_gelu" , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0_2 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
__a : Union[str, Any] = hidden_size
__a : Optional[int] = intermediate_size
__a : Tuple = num_hidden_layers
__a : Tuple = num_attention_heads
__a : Optional[int] = num_channels
__a : Tuple = patch_size
__a : Any = image_size
__a : Optional[Any] = initializer_range
__a : Optional[int] = attention_dropout
__a : Any = layer_norm_eps
__a : Dict = hidden_act
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , **_UpperCAmelCase ):
cls._set_token_in_kwargs(_UpperCAmelCase )
__a , __a : Dict = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from GITConfig
if config_dict.get('''model_type''' ) == "git":
__a : int = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = '''git'''
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=30522 , _UpperCAmelCase=768 , _UpperCAmelCase=6 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1024 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase=0 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=101 , _UpperCAmelCase=102 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
if vision_config is None:
__a : Union[str, Any] = {}
logger.info('''vision_config is None. initializing the GitVisionConfig with default values.''' )
__a : Union[str, Any] = GitVisionConfig(**_UpperCAmelCase )
__a : List[str] = vocab_size
__a : Any = hidden_size
__a : Dict = num_hidden_layers
__a : Tuple = num_attention_heads
__a : List[str] = hidden_act
__a : Any = intermediate_size
__a : int = hidden_dropout_prob
__a : Dict = attention_probs_dropout_prob
__a : Tuple = max_position_embeddings
__a : int = initializer_range
__a : List[str] = layer_norm_eps
__a : Tuple = position_embedding_type
__a : Optional[int] = use_cache
__a : str = tie_word_embeddings
__a : Optional[Any] = num_image_with_embedding
__a : str = bos_token_id
__a : Dict = eos_token_id
def _lowerCamelCase ( self ):
__a : Optional[int] = copy.deepcopy(self.__dict__ )
__a : int = self.vision_config.to_dict()
__a : Tuple = self.__class__.model_type
return output
| 101
| 1
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(snake_case )
__SCREAMING_SNAKE_CASE : Tuple = nn.functional.normalize(snake_case )
return torch.mm(snake_case , normalized_text_embeds.t() )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = CLIPConfig
lowerCAmelCase_ = ['''CLIPEncoderLayer''']
def __init__( self : List[Any] , _A : CLIPConfig ):
"""simple docstring"""
super().__init__(_A )
__SCREAMING_SNAKE_CASE : List[Any] = CLIPVisionModel(config.vision_config )
__SCREAMING_SNAKE_CASE : Any = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=_A )
__SCREAMING_SNAKE_CASE : List[Any] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=_A )
__SCREAMING_SNAKE_CASE : Optional[Any] = nn.Parameter(torch.ones(17 ) , requires_grad=_A )
__SCREAMING_SNAKE_CASE : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=_A )
@torch.no_grad()
def UpperCAmelCase__ ( self : Union[str, Any] , _A : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = self.vision_model(_A )[1] # pooled_output
__SCREAMING_SNAKE_CASE : Optional[int] = self.visual_projection(_A )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE : Any = cosine_distance(_A , self.special_care_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Dict = cosine_distance(_A , self.concept_embeds ).cpu().float().numpy()
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
__SCREAMING_SNAKE_CASE : int = image_embeds.shape[0]
for i in range(_A ):
__SCREAMING_SNAKE_CASE : Dict = {'''special_scores''': {}, '''special_care''': [], '''concept_scores''': {}, '''bad_concepts''': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : Optional[int] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = special_cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : List[str] = self.special_care_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['''special_scores'''][concept_idx]} )
__SCREAMING_SNAKE_CASE : Dict = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
__SCREAMING_SNAKE_CASE : Tuple = cos_dist[i][concept_idx]
__SCREAMING_SNAKE_CASE : List[str] = self.concept_embeds_weights[concept_idx].item()
__SCREAMING_SNAKE_CASE : Any = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(_A )
result.append(_A )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [len(res['''bad_concepts'''] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def UpperCAmelCase__ ( self : str , _A : torch.FloatTensor , _A : torch.FloatTensor ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = self.vision_model(_A )[1] # pooled_output
__SCREAMING_SNAKE_CASE : str = self.visual_projection(_A )
__SCREAMING_SNAKE_CASE : List[Any] = cosine_distance(_A , self.special_care_embeds )
__SCREAMING_SNAKE_CASE : Optional[Any] = cosine_distance(_A , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
__SCREAMING_SNAKE_CASE : int = 0.0
__SCREAMING_SNAKE_CASE : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
__SCREAMING_SNAKE_CASE : List[Any] = special_care * 0.01
__SCREAMING_SNAKE_CASE : Optional[Any] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
__SCREAMING_SNAKE_CASE : int = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
__SCREAMING_SNAKE_CASE : int = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 74
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
__SCREAMING_SNAKE_CASE : Tuple = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def UpperCAmelCase__ ( self : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = [[1, 2, 3], [1, 2, 4]]
__SCREAMING_SNAKE_CASE : Optional[Any] = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(1 )
__SCREAMING_SNAKE_CASE : int = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[str] = dc.update(3 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def UpperCAmelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__SCREAMING_SNAKE_CASE : str = DisjunctiveConstraint(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Tuple = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : int = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[int] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 74
| 1
|
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
__UpperCamelCase : Dict = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""bert""", choices=["""bert"""])
parser.add_argument("""--model_name""", default="""bert-base-uncased""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_bert-base-uncased_0247911.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__UpperCamelCase : Optional[int] = parser.parse_args()
if args.model_type == "bert":
__UpperCamelCase : str = BertForMaskedLM.from_pretrained(args.model_name)
__UpperCamelCase : Dict = """bert"""
else:
raise ValueError("""args.model_type should be \"bert\".""")
__UpperCamelCase : Optional[int] = model.state_dict()
__UpperCamelCase : Union[str, Any] = {}
for w in ["word_embeddings", "position_embeddings"]:
__UpperCamelCase : Tuple = state_dict[f'''{prefix}.embeddings.{w}.weight''']
for w in ["weight", "bias"]:
__UpperCamelCase : List[str] = state_dict[f'''{prefix}.embeddings.LayerNorm.{w}''']
__UpperCamelCase : str = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
for w in ["weight", "bias"]:
__UpperCamelCase : List[str] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}'''
]
__UpperCamelCase : Tuple = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}'''
]
__UpperCamelCase : Union[str, Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}'''
]
__UpperCamelCase : str = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}'''
]
__UpperCamelCase : List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}'''
]
__UpperCamelCase : List[Any] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}'''
]
__UpperCamelCase : Any = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}'''
]
__UpperCamelCase : Optional[int] = state_dict[
f'''{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}'''
]
std_idx += 1
__UpperCamelCase : List[str] = state_dict["""cls.predictions.decoder.weight"""]
__UpperCamelCase : int = state_dict["""cls.predictions.bias"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__UpperCamelCase : str = state_dict[f'''cls.predictions.transform.dense.{w}''']
__UpperCamelCase : List[str] = state_dict[f'''cls.predictions.transform.LayerNorm.{w}''']
print(f'''N layers selected for distillation: {std_idx}''')
print(f'''Number of params transferred for distillation: {len(compressed_sd.keys())}''')
print(f'''Save transferred checkpoint to {args.dump_checkpoint}.''')
torch.save(compressed_sd, args.dump_checkpoint)
| 372
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCamelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Dict = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 372
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
'''naver-clova-ix/donut-base''': '''https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json''',
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE ):
A_ : Dict = 'donut-swin'
A_ : int = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[Any] , UpperCamelCase_ : Optional[Any]=2_24 , UpperCamelCase_ : Optional[int]=4 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Dict=96 , UpperCamelCase_ : Union[str, Any]=[2, 2, 6, 2] , UpperCamelCase_ : Optional[Any]=[3, 6, 12, 24] , UpperCamelCase_ : Optional[int]=7 , UpperCamelCase_ : Optional[int]=4.0 , UpperCamelCase_ : Any=True , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : List[Any]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Dict="gelu" , UpperCamelCase_ : Optional[int]=False , UpperCamelCase_ : Any=0.02 , UpperCamelCase_ : Any=1e-5 , **UpperCamelCase_ : Any , ) -> Any:
super().__init__(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = image_size
SCREAMING_SNAKE_CASE__ :Dict = patch_size
SCREAMING_SNAKE_CASE__ :List[str] = num_channels
SCREAMING_SNAKE_CASE__ :Dict = embed_dim
SCREAMING_SNAKE_CASE__ :Optional[Any] = depths
SCREAMING_SNAKE_CASE__ :Any = len(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = num_heads
SCREAMING_SNAKE_CASE__ :List[Any] = window_size
SCREAMING_SNAKE_CASE__ :List[Any] = mlp_ratio
SCREAMING_SNAKE_CASE__ :Tuple = qkv_bias
SCREAMING_SNAKE_CASE__ :Union[str, Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ :Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ :Union[str, Any] = drop_path_rate
SCREAMING_SNAKE_CASE__ :Any = hidden_act
SCREAMING_SNAKE_CASE__ :Dict = use_absolute_embeddings
SCREAMING_SNAKE_CASE__ :str = layer_norm_eps
SCREAMING_SNAKE_CASE__ :str = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
SCREAMING_SNAKE_CASE__ :Dict = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) )
| 209
|
'''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : nn.Module
A_ : List[nn.Module] = field(default_factory=_SCREAMING_SNAKE_CASE )
A_ : list = field(default_factory=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tensor , UpperCamelCase_ : Tensor ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase_ , nn.Convad ) or isinstance(UpperCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase_ )
def __call__( self : Tuple , UpperCamelCase_ : Tensor ) -> int:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self : Tuple ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : nn.Module
A_ : nn.Module
A_ : int = 0
A_ : List = field(default_factory=_SCREAMING_SNAKE_CASE )
A_ : List = field(default_factory=_SCREAMING_SNAKE_CASE )
def __call__( self : List[Any] , UpperCamelCase_ : Tensor ) -> int:
SCREAMING_SNAKE_CASE__ :List[str] = Tracker(self.dest )(UpperCamelCase_ ).parametrized
SCREAMING_SNAKE_CASE__ :Any = Tracker(self.src )(UpperCamelCase_ ).parametrized
SCREAMING_SNAKE_CASE__ :Dict = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.src_skip , UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :str = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.dest_skip , UpperCamelCase_ ) )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(UpperCamelCase_ )} operations while'''
f''' destination module has {len(UpperCamelCase_ )}.''' )
for dest_m, src_m in zip(UpperCamelCase_ , UpperCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : ResNetConfig , UpperCAmelCase__ : Path , UpperCAmelCase__ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :str = timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE__ :List[str] = ResNetForImageClassification(UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE__ :Dict = ModuleTransfer(src=UpperCAmelCase__ , dest=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCAmelCase__ )
assert torch.allclose(from_model(UpperCAmelCase__ ) , our_model(UpperCAmelCase__ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE__ :List[str] = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCAmelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase__ , )
# we can use the convnext one
SCREAMING_SNAKE_CASE__ :int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase ( UpperCAmelCase__ : Path , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ :Any = 1_0_0_0
SCREAMING_SNAKE_CASE__ :List[Any] = (1, num_labels)
SCREAMING_SNAKE_CASE__ :List[str] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ :Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ :Tuple = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ :int = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :str = idalabel
SCREAMING_SNAKE_CASE__ :Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :Optional[Any] = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[str] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 209
| 1
|
'''simple docstring'''
import baseaa
def __UpperCAmelCase ( a_: str ):
return baseaa.baaencode(string.encode("utf-8" ) )
def __UpperCAmelCase ( a_: bytes ):
return baseaa.baadecode(a_ ).decode("utf-8" )
if __name__ == "__main__":
__a = 'Hello World!'
__a = baseaa_encode(test)
print(encoded)
__a = baseaa_decode(encoded)
print(decoded)
| 257
|
'''simple docstring'''
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = DebertaVaTokenizer
UpperCamelCase_ : List[str] = DebertaVaTokenizerFast
UpperCamelCase_ : Optional[Any] = True
UpperCamelCase_ : Tuple = True
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase : Tuple = DebertaVaTokenizer(lowerCAmelCase__ , unk_token="<unk>" )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : int ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = "this is a test"
_UpperCAmelCase : Dict = "this is a test"
return input_text, output_text
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[str] = "<pad>"
_UpperCAmelCase : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__ ) , lowerCAmelCase__ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__ ) , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<pad>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "[PAD]" )
self.assertEqual(len(lowerCAmelCase__ ) , 3_0_0_0_1 )
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 3_0_0_0_0 )
def _lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = " \tHeLLo!how \n Are yoU? "
_UpperCAmelCase : Optional[int] = ["▁hello", "!", "how", "▁are", "▁you", "?"]
# fmt: on
_UpperCAmelCase : Dict = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
pass
@unittest.skip("There is an inconsistency between slow and fast tokenizer due to a bug in the fast one." )
def _lowerCAmelCase ( self : Dict ) -> Tuple:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Optional[Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Any = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Tuple = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : List[str] = ["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_UpperCAmelCase : List[str] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Any = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Tuple = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", "▁", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "▁", ".", ]
# fmt: on
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : str = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : int = " \tHeLLo!how \n Are yoU? "
_UpperCAmelCase : List[str] = ["▁", "<unk>", "e", "<unk>", "o", "!", "how", "▁", "<unk>", "re", "▁yo", "<unk>", "?"]
# fmt: on
_UpperCAmelCase : List[Any] = DebertaVaTokenizer(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = DebertaVaTokenizerFast(lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , split_by_punct=lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> str:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_tokenizer()
_UpperCAmelCase : int = self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] = "I was born in 92000, and this is falsé."
_UpperCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
_UpperCAmelCase : Tuple = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = self.get_rust_tokenizer()
_UpperCAmelCase : Dict = tokenizer.encode(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = rust_tokenizer.encode(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : List[Any] = "This is a test"
_UpperCAmelCase : Tuple = [1_3, 1, 4_3_9_8, 2_5, 2_1, 1_2_8_9]
_UpperCAmelCase : Tuple = ["▁", "T", "his", "▁is", "▁a", "▁test"]
_UpperCAmelCase : int = ["▁", "<unk>", "his", "▁is", "▁a", "▁test"]
_UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : List[str] = DebertaVaTokenizerFast(lowerCAmelCase__ , keep_accents=lowerCAmelCase__ )
_UpperCAmelCase : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
# fmt: off
_UpperCAmelCase : Any = "I was born in 92000, and this is falsé."
_UpperCAmelCase : Tuple = [1_3, 1, 2_3, 3_8_6, 1_9, 5_6_1, 3_0_5_0, 1_5, 1_7, 4_8, 2_5, 8_2_5_6, 1_8, 1, 9]
_UpperCAmelCase : Optional[Any] = ["▁", "I", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", ".", ]
_UpperCAmelCase : Union[str, Any] = ["▁", "<unk>", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", ".", ]
# fmt: on
_UpperCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Dict = rust_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowerCAmelCase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Any = DebertaVaTokenizer(lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = tokenizer.encode("sequence builders" )
_UpperCAmelCase : int = tokenizer.encode("multi-sequence build" )
_UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ )
_UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__ , lowerCAmelCase__ )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase__ )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase__ , )
@slow
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = {"input_ids": [[1, 3_9_8_6_7, 3_6, 1_9_3_9_0, 4_8_6, 2_7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 6_0_6_8_5, 1_2_2_5, 7, 3_5_0_5_2, 8_1_4_3_6, 1_8, 9_3_6_7, 1_6_8_9_9, 1_8, 1_5_9_3_7, 5_3, 5_9_4, 7_7_3, 1_8, 1_6_2_8_7, 3_0_4_6_5, 3_6, 1_5_9_3_7, 6, 4_1_1_3_9, 3_8, 3_6_9_7_9, 6_0_7_6_3, 1_9_1, 6, 3_4_1_3_2, 9_9, 6, 5_0_5_3_8, 3_9_0, 4_3_2_3_0, 6, 3_4_1_3_2, 2_7_7_9, 2_0_8_5_0, 1_4, 6_9_9, 1_0_7_2, 1_1_9_4, 3_6, 3_8_2, 1_0_9_0_1, 5_3, 7, 6_9_9, 1_0_7_2, 2_0_8_4, 3_6, 2_0_4_2_2, 6_3_0, 5_3, 1_9, 1_0_5, 3_0_4_9, 1_8_9_6, 1_0_5_3, 1_6_8_9_9, 1_5_0_6, 1_1, 3_7_9_7_8, 4_2_4_3, 7, 1_2_3_7, 3_1_8_6_9, 2_0_0, 1_6_5_6_6, 6_5_4, 6, 3_5_0_5_2, 8_1_4_3_6, 7, 5_5_6_3_0, 1_3_5_9_3, 4, 2], [1, 2_6, 1_5_0_1_1, 1_3, 6_6_7, 8, 1_0_5_3, 1_8, 2_3_6_1_1, 1_2_3_7, 7_2_3_5_6, 1_2_8_2_0, 3_4, 1_0_4_1_3_4, 1_2_0_9, 3_5, 1_3_3_1_3, 6_6_2_7, 2_1, 2_0_2, 3_4_7, 7, 1_6_4, 2_3_9_9, 1_1, 4_6, 4_4_8_5, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_2_3_2, 2_8_6_4, 1_5_7_8_5, 1_4_9_5_1, 1_0_5, 5, 8_5_8_1, 1_2_5_0, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__ , model_name="microsoft/deberta-v2-xlarge" , revision="ad6e42c1532ddf3a15c39246b63f5559d558b670" , )
| 257
| 1
|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 'EncodecFeatureExtractor'
lowerCamelCase : Any = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = self.feature_extractor
__lowerCamelCase : List[str] = False
def lowercase_ ( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True ) -> List[Any]:
return self.tokenizer.get_decoder_prompt_ids(task=SCREAMING_SNAKE_CASE_ , language=SCREAMING_SNAKE_CASE_ , no_timestamps=SCREAMING_SNAKE_CASE_ )
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = kwargs.pop('audio' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = kwargs.pop('sampling_rate' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = kwargs.pop('text' , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCamelCase : Union[str, Any] = args[0]
__lowerCamelCase : List[str] = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if text is not None:
__lowerCamelCase : List[Any] = self.tokenizer(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is not None:
__lowerCamelCase : Any = self.feature_extractor(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
__lowerCamelCase : Any = audio_inputs['input_values']
if "padding_mask" in audio_inputs:
__lowerCamelCase : List[Any] = audio_inputs['padding_mask']
return inputs
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> Tuple:
__lowerCamelCase : int = kwargs.pop('audio' , SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Any = kwargs.pop('padding_mask' , SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
__lowerCamelCase : Union[str, Any] = args[0]
__lowerCamelCase : Any = args[1:]
if audio_values is not None:
return self._decode_audio(SCREAMING_SNAKE_CASE_ , padding_mask=SCREAMING_SNAKE_CASE_ )
else:
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> int:
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> List[np.ndarray]:
__lowerCamelCase : Optional[Any] = to_numpy(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = audio_values.shape
if padding_mask is None:
return list(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = to_numpy(SCREAMING_SNAKE_CASE_ )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
__lowerCamelCase : int = seq_len - padding_mask.shape[-1]
__lowerCamelCase : Union[str, Any] = 1 - self.feature_extractor.padding_value
__lowerCamelCase : str = np.pad(SCREAMING_SNAKE_CASE_ , ((0, 0), (0, difference)) , 'constant' , constant_values=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = audio_values.tolist()
for i in range(SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : str = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
__lowerCamelCase : int = sliced_audio.reshape(SCREAMING_SNAKE_CASE_ , -1 )
return audio_values
| 13
|
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
_snake_case = random.Random()
def __snake_case ( SCREAMING_SNAKE_CASE: str , SCREAMING_SNAKE_CASE: List[Any]=1.0 , SCREAMING_SNAKE_CASE: Any=None , SCREAMING_SNAKE_CASE: Any=None ):
"""simple docstring"""
if rng is None:
_lowerCAmelCase = global_rng
_lowerCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Optional[int]=400 , UpperCAmelCase_ : List[str]=2_000 , UpperCAmelCase_ : List[Any]=10 , UpperCAmelCase_ : Optional[Any]=160 , UpperCAmelCase_ : List[str]=8 , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : str=4_000 , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Tuple=True , ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = min_seq_length
_lowerCAmelCase = max_seq_length
_lowerCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_lowerCAmelCase = padding_value
_lowerCAmelCase = sampling_rate
_lowerCAmelCase = return_attention_mask
_lowerCAmelCase = do_normalize
_lowerCAmelCase = feature_size
_lowerCAmelCase = chunk_length
_lowerCAmelCase = hop_length
def __lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def __lowerCamelCase ( self : int , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=False ) -> Union[str, Any]:
"""simple docstring"""
def _flatten(UpperCAmelCase_ : Optional[Any] ):
return list(itertools.chain(*UpperCAmelCase_ ) )
if equal_length:
_lowerCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_lowerCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Optional[int] = WhisperFeatureExtractor if is_speech_available() else None
def __lowerCamelCase ( self : Any ) -> Any:
"""simple docstring"""
_lowerCAmelCase = WhisperFeatureExtractionTester(self )
def __lowerCamelCase ( self : int ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = feat_extract_first.save_pretrained(UpperCAmelCase_ )[0]
check_json_file_has_correct_format(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_pretrained(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = feat_extract_first.mel_filters
_lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCAmelCase = os.path.join(UpperCAmelCase_ , 'feat_extract.json' )
feat_extract_first.to_json_file(UpperCAmelCase_ )
_lowerCAmelCase = self.feature_extraction_class.from_json_file(UpperCAmelCase_ )
_lowerCAmelCase = feat_extract_first.to_dict()
_lowerCAmelCase = feat_extract_second.to_dict()
_lowerCAmelCase = feat_extract_first.mel_filters
_lowerCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ ) )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
# Test feature size
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , padding='max_length' , return_tensors='np' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_lowerCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test batched
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_lowerCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_lowerCAmelCase = np.asarray(UpperCAmelCase_ )
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
# Test truncation required
_lowerCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs]
_lowerCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_lowerCAmelCase = [np.asarray(UpperCAmelCase_ ) for speech_input in speech_inputs_truncated]
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='np' ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 ) )
def __lowerCamelCase ( self : int ) -> str:
"""simple docstring"""
import torch
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_lowerCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_lowerCAmelCase = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def __lowerCamelCase ( self : List[str] , UpperCAmelCase_ : Any ) -> Dict:
"""simple docstring"""
_lowerCAmelCase = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
# automatic decoding with librispeech
_lowerCAmelCase = ds.sort('id' ).select(range(UpperCAmelCase_ ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def __lowerCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_lowerCAmelCase = self._load_datasamples(1 )
_lowerCAmelCase = WhisperFeatureExtractor()
_lowerCAmelCase = feature_extractor(UpperCAmelCase_ , return_tensors='pt' ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , UpperCAmelCase_ , atol=1E-4 ) )
def __lowerCamelCase ( self : Tuple ) -> int:
"""simple docstring"""
_lowerCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_lowerCAmelCase = self._load_datasamples(1 )[0]
_lowerCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_lowerCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=UpperCAmelCase_ )[0]
self.assertTrue(np.all(np.mean(UpperCAmelCase_ ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCAmelCase_ ) - 1 ) < 1E-3 ) )
| 580
| 0
|
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = True , _UpperCamelCase = False ) -> str:
lowerCAmelCase_ = scheduler
lowerCAmelCase_ = optimizers if isinstance(_UpperCamelCase , (list, tuple) ) else [optimizers]
lowerCAmelCase_ = split_batches
lowerCAmelCase_ = step_with_optimizer
lowerCAmelCase_ = GradientState()
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> str:
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
lowerCAmelCase_ = AcceleratorState().num_processes
for _ in range(_UpperCamelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
else:
self.scheduler.step(*_UpperCamelCase , **_UpperCamelCase )
def __a ( self ) -> Optional[Any]:
return self.scheduler.get_last_lr()
def __a ( self ) -> Any:
return self.scheduler.state_dict()
def __a ( self , _UpperCamelCase ) -> Optional[Any]:
self.scheduler.load_state_dict(_UpperCamelCase )
def __a ( self ) -> Dict:
return self.scheduler.get_lr()
def __a ( self , *_UpperCamelCase , **_UpperCamelCase ) -> Tuple:
return self.scheduler.print_lr(*_UpperCamelCase , **_UpperCamelCase )
| 279
|
from collections.abc import Iterable
from typing import Any
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase = None ) -> str:
lowerCAmelCase_ = value
lowerCAmelCase_ = None # Added in order to delete a node easier
lowerCAmelCase_ = None
lowerCAmelCase_ = None
def __repr__( self ) -> str:
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f"""{self.value}""": (self.left, self.right)} , indent=1 )
class _lowerCAmelCase :
def __init__( self , _UpperCamelCase = None ) -> Union[str, Any]:
lowerCAmelCase_ = root
def __str__( self ) -> str:
return str(self.root )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
if new_children is not None: # reset its kids
lowerCAmelCase_ = node.parent
if node.parent is not None: # reset its parent
if self.is_right(_UpperCamelCase ): # If it is the right children
lowerCAmelCase_ = new_children
else:
lowerCAmelCase_ = new_children
else:
lowerCAmelCase_ = new_children
def __a ( self , _UpperCamelCase ) -> bool:
if node.parent and node.parent.right:
return node == node.parent.right
return False
def __a ( self ) -> bool:
return self.root is None
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = Node(_UpperCamelCase ) # create a new Node
if self.empty(): # if Tree is empty
lowerCAmelCase_ = new_node # set its root
else: # Tree is not empty
lowerCAmelCase_ = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
lowerCAmelCase_ = new_node # We insert the new node in a leaf
break
else:
lowerCAmelCase_ = parent_node.left
else:
if parent_node.right is None:
lowerCAmelCase_ = new_node
break
else:
lowerCAmelCase_ = parent_node.right
lowerCAmelCase_ = parent_node
def __a ( self , *_UpperCamelCase ) -> None:
for value in values:
self.__insert(_UpperCamelCase )
def __a ( self , _UpperCamelCase ) -> Node | None:
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
lowerCAmelCase_ = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
lowerCAmelCase_ = node.left if value < node.value else node.right
return node
def __a ( self , _UpperCamelCase = None ) -> Node | None:
if node is None:
if self.root is None:
return None
lowerCAmelCase_ = self.root
if not self.empty():
while node.right is not None:
lowerCAmelCase_ = node.right
return node
def __a ( self , _UpperCamelCase = None ) -> Node | None:
if node is None:
lowerCAmelCase_ = self.root
if self.root is None:
return None
if not self.empty():
lowerCAmelCase_ = self.root
while node.left is not None:
lowerCAmelCase_ = node.left
return node
def __a ( self , _UpperCamelCase ) -> None:
lowerCAmelCase_ = self.search(_UpperCamelCase ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(_UpperCamelCase , _UpperCamelCase )
elif node.left is None: # Has only right children
self.__reassign_nodes(_UpperCamelCase , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(_UpperCamelCase , node.left )
else:
lowerCAmelCase_ = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
lowerCAmelCase_ = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def __a ( self , _UpperCamelCase ) -> Iterable:
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def __a ( self , _UpperCamelCase=None ) -> Any:
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> None:
if node:
self.inorder(_UpperCamelCase , node.left )
arr.append(node.value )
self.inorder(_UpperCamelCase , node.right )
def __a ( self , _UpperCamelCase , _UpperCamelCase ) -> int:
lowerCAmelCase_ = []
self.inorder(_UpperCamelCase , _UpperCamelCase ) # append all values to list using inorder traversal
return arr[k - 1]
def lowerCamelCase__ ( __lowerCAmelCase : Node | None ):
"""simple docstring"""
lowerCAmelCase_ = []
if curr_node is not None:
lowerCAmelCase_ = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def lowerCamelCase__ ( ):
"""simple docstring"""
lowerCAmelCase_ = (8, 3, 6, 1, 10, 14, 13, 4, 7)
lowerCAmelCase_ = BinarySearchTree()
for i in testlist:
t.insert(__lowerCAmelCase )
# Prints all the elements of the list in order traversal
print(__lowerCAmelCase )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(__lowerCAmelCase )
print(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 279
| 1
|
import datasets
_lowercase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
_lowercase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
_lowercase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def _A (UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ) ->Optional[int]:
'''simple docstring'''
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A ( datasets.Metric ):
def _snake_case (self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
"""references""": datasets.Value("""int64""" if self.config_name != """sts-b""" else """float32""" ),
} ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , )
def _snake_case (self , __magic_name__ , __magic_name__ ):
return {"accuracy": simple_accuracy(__magic_name__ , __magic_name__ )}
| 157
|
import math
def _A (UpperCamelCase : list , UpperCamelCase : int = 0 , UpperCamelCase : int = 0 ) ->list:
'''simple docstring'''
lowerCamelCase__ : Tuple = end or len(UpperCamelCase )
for i in range(UpperCamelCase , UpperCamelCase ):
lowerCamelCase__ : List[str] = i
lowerCamelCase__ : List[str] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCamelCase__ : Union[str, Any] = array[temp_index - 1]
temp_index -= 1
lowerCamelCase__ : Dict = temp_index_value
return array
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int ) ->None: # Max Heap
'''simple docstring'''
lowerCamelCase__ : Any = index
lowerCamelCase__ : int = 2 * index + 1 # Left Node
lowerCamelCase__ : str = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCamelCase__ : Tuple = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCamelCase__ : List[Any] = right_index
if largest != index:
lowerCamelCase__ ,lowerCamelCase__ : Any = array[largest], array[index]
heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
lowerCamelCase__ : Optional[int] = len(UpperCamelCase )
for i in range(n // 2 , -1 , -1 ):
heapify(UpperCamelCase , UpperCamelCase , UpperCamelCase )
for i in range(n - 1 , 0 , -1 ):
lowerCamelCase__ ,lowerCamelCase__ : int = array[0], array[i]
heapify(UpperCamelCase , 0 , UpperCamelCase )
return array
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->int:
'''simple docstring'''
lowerCamelCase__ : Optional[Any] = low
lowerCamelCase__ : Optional[int] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCamelCase__ ,lowerCamelCase__ : Dict = array[j], array[i]
i += 1
def _A (UpperCamelCase : list ) ->list:
'''simple docstring'''
if len(UpperCamelCase ) == 0:
return array
lowerCamelCase__ : Any = 2 * math.ceil(math.loga(len(UpperCamelCase ) ) )
lowerCamelCase__ : Union[str, Any] = 16
return intro_sort(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase , UpperCamelCase )
def _A (UpperCamelCase : list , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ) ->list:
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(UpperCamelCase )
max_depth -= 1
lowerCamelCase__ : Tuple = median_of_a(UpperCamelCase , UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 )
lowerCamelCase__ : Optional[int] = partition(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
intro_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
lowerCamelCase__ : str = p
return insertion_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = input('''Enter numbers separated by a comma : ''').strip()
_lowercase = [float(item) for item in user_input.split(''',''')]
print(sort(unsorted))
| 157
| 1
|
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE)
class _a ( __SCREAMING_SNAKE_CASE):
"""simple docstring"""
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
UpperCamelCase__ = field(default="""summarization""" , metadata={"""include_in_asdict_even_if_is_default""": True})
UpperCamelCase__ = Features({"""text""": Value("""string""")})
UpperCamelCase__ = Features({"""summary""": Value("""string""")})
UpperCamelCase__ = """text"""
UpperCamelCase__ = """summary"""
@property
def lowercase__ ( self : Optional[int] )->Dict:
return {self.text_column: "text", self.summary_column: "summary"}
| 705
|
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class _a :
"""simple docstring"""
@staticmethod
def lowercase__ ( *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[Any] )->List[str]:
pass
def lowercase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__A : Dict = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def lowercase__ ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : str )->Tuple:
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model=__UpperCamelCase , tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
_UpperCAmelCase = '''What is the placebo?'''
_UpperCAmelCase = [
{
'''image''': load_image(__UpperCamelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def lowercase__ ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] )->Any:
_UpperCAmelCase = dqa_pipeline(__UpperCamelCase , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
[
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
{'''score''': ANY(__UpperCamelCase ), '''answer''': ANY(__UpperCamelCase ), '''start''': ANY(__UpperCamelCase ), '''end''': ANY(__UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : str )->Union[str, Any]:
_UpperCAmelCase = pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''How many cats are there?'''
_UpperCAmelCase = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 3_8, '''end''': 3_9},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 3_8, '''end''': 4_0},
]
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
_UpperCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , __UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
_UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
_UpperCAmelCase = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , words=__UpperCamelCase , boxes=__UpperCamelCase , top_k=2 )
self.assertEqual(__UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : Union[str, Any] )->Optional[Any]:
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def lowercase__ ( self : int )->Optional[int]:
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=5_0 , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self : Dict )->Tuple:
_UpperCAmelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_UpperCAmelCase = dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
_UpperCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
]
]
* 2 , )
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_UpperCAmelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 2_3, '''end''': 2_3},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def lowercase__ ( self : List[str] )->Any:
_UpperCAmelCase = AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=__UpperCamelCase )
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=__UpperCamelCase , revision='''3dc6de3''' , max_seq_len=5_0 , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
_UpperCAmelCase = dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
]
]
* 2 , )
_UpperCAmelCase = list(zip(*apply_tesseract(load_image(__UpperCamelCase ) , __UpperCamelCase , '''''' ) ) )
# This model should also work if `image` is set to None
_UpperCAmelCase = dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 1_6, '''end''': 1_6},
] , )
@slow
@require_torch
def lowercase__ ( self : int )->str:
_UpperCAmelCase = pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
_UpperCAmelCase = INVOICE_URL
_UpperCAmelCase = '''What is the invoice number?'''
_UpperCAmelCase = dqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(__UpperCamelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def lowercase__ ( self : Tuple )->Union[str, Any]:
pass
| 95
| 0
|
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
UpperCAmelCase : Dict = {
"bart": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"bert": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"bert-base-cased-finetuned-mrpc": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"dpr": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"gpt2": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlnet": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"xlm-roberta": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"transfo-xl": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"openai-gpt": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"roberta": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"layoutlm": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"roberta-large-mnli": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"camembert": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"flaubert": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"distilbert-base-distilled-squad": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"lxmert-visual-feature-encoder": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"ctrl": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"albert": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"t5": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"electra": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"wav2vec2": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False , __lowerCAmelCase=True ) -> Optional[Any]:
'''simple docstring'''
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase_ = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
lowercase_ = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
lowercase_ = config_class.from_json_file(lowerCamelCase_ )
lowercase_ = True
lowercase_ = True
print(F'''Building TensorFlow model from configuration: {config}''' )
lowercase_ = model_class(lowerCamelCase_ )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
lowercase_ = cached_file(
lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
lowercase_ = load_pytorch_checkpoint_in_tfa_model(lowerCamelCase_ , lowerCamelCase_ )
if compare_with_pt_model:
lowercase_ = tf_model(tf_model.dummy_inputs , training=lowerCamelCase_ ) # build the network
lowercase_ = torch.load(lowerCamelCase_ , map_location="""cpu""" )
lowercase_ = pt_model_class.from_pretrained(
pretrained_model_name_or_path=lowerCamelCase_ , config=lowerCamelCase_ , state_dict=lowerCamelCase_ )
with torch.no_grad():
lowercase_ = pt_model(**pt_model.dummy_inputs )
lowercase_ = pto[0].numpy()
lowercase_ = tfo[0].numpy()
lowercase_ = np.amax(np.abs(np_pt - np_tf ) )
print(F'''Max absolute difference between models outputs {diff}''' )
assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}'''
# Save pytorch-model
print(F'''Save TensorFlow model to {tf_dump_path}''' )
tf_model.save_weights(lowerCamelCase_ , save_format="""h5""" )
def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , __lowerCAmelCase=False , ) -> Tuple:
'''simple docstring'''
if args_model_type is None:
lowercase_ = list(MODEL_CLASSES.keys() )
else:
lowercase_ = [args_model_type]
for j, model_type in enumerate(lowerCamelCase_ , start=1 ):
print("""=""" * 1_00 )
print(F''' Converting model type {j}/{len(lowerCamelCase_ )}: {model_type}''' )
print("""=""" * 1_00 )
if model_type not in MODEL_CLASSES:
raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' )
lowercase_ = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
lowercase_ = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
lowercase_ = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(lowerCamelCase_ , lowerCamelCase_ ) , start=1 ):
print("""-""" * 1_00 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' )
continue
lowercase_ = model_shortcut_name
elif only_convert_finetuned_models:
print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' )
continue
print(
F''' Converting checkpoint {i}/{len(lowerCamelCase_ )}: {model_shortcut_name} - model_type {model_type}''' )
print("""-""" * 1_00 )
if config_shortcut_name in aws_config_map:
lowercase_ = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
else:
lowercase_ = config_shortcut_name
if model_shortcut_name in aws_model_maps:
lowercase_ = cached_file(lowerCamelCase_ , lowerCamelCase_ , force_download=not use_cached_models )
else:
lowercase_ = model_shortcut_name
if os.path.isfile(lowerCamelCase_ ):
lowercase_ = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=lowerCamelCase_ , pytorch_checkpoint_path=lowerCamelCase_ , config_file=lowerCamelCase_ , tf_dump_path=os.path.join(lowerCamelCase_ , model_shortcut_name + """-tf_model.h5""" ) , compare_with_pt_model=lowerCamelCase_ , )
if remove_cached_files:
os.remove(lowerCamelCase_ )
os.remove(lowerCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file."
)
parser.add_argument(
"--model_type",
default=None,
type=str,
help=(
F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and "
"convert all the models from AWS."
),
)
parser.add_argument(
"--pytorch_checkpoint_path",
default=None,
type=str,
help=(
"Path to the PyTorch checkpoint path or shortcut name to download from AWS. "
"If not given, will download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--config_file",
default=None,
type=str,
help=(
"The config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture. If not given and "
"--pytorch_checkpoint_path is not given or is a shortcut name "
"use the configuration associated to the shortcut name on the AWS"
),
)
parser.add_argument(
"--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions."
)
parser.add_argument(
"--use_cached_models",
action="store_true",
help="Use cached models if possible instead of updating to latest checkpoint versions.",
)
parser.add_argument(
"--remove_cached_files",
action="store_true",
help="Remove pytorch models after conversion (save memory when converting in batches).",
)
parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.")
UpperCAmelCase : List[Any] = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 567
|
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
@wraps(lowerCamelCase_ )
def _inner_fn(*lowerCamelCase_ , **lowerCamelCase_ ):
warnings.warn(
(f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , lowerCamelCase_ , )
return fn(*lowerCamelCase_ , **lowerCamelCase_ )
return _inner_fn
| 378
| 0
|
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class _A ( unittest.TestCase ):
'''simple docstring'''
def __init__( self ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=13 ,SCREAMING_SNAKE_CASE_=7 ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=99 ,SCREAMING_SNAKE_CASE_=32 ,SCREAMING_SNAKE_CASE_=5 ,SCREAMING_SNAKE_CASE_=4 ,SCREAMING_SNAKE_CASE_=37 ,SCREAMING_SNAKE_CASE_="gelu" ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=0.1 ,SCREAMING_SNAKE_CASE_=512 ,SCREAMING_SNAKE_CASE_=16 ,SCREAMING_SNAKE_CASE_=2 ,SCREAMING_SNAKE_CASE_=0.02 ,SCREAMING_SNAKE_CASE_=4 ,):
'''simple docstring'''
snake_case : Optional[Any] = parent
snake_case : Any = batch_size
snake_case : str = seq_length
snake_case : Union[str, Any] = is_training
snake_case : Any = use_attention_mask
snake_case : str = use_token_type_ids
snake_case : Dict = use_labels
snake_case : List[str] = vocab_size
snake_case : Optional[Any] = hidden_size
snake_case : Tuple = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : str = hidden_act
snake_case : Optional[int] = hidden_dropout_prob
snake_case : Optional[int] = attention_probs_dropout_prob
snake_case : str = max_position_embeddings
snake_case : Dict = type_vocab_size
snake_case : List[str] = type_sequence_label_size
snake_case : str = initializer_range
snake_case : int = num_choices
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case : Dict = None
if self.use_attention_mask:
snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : str = None
if self.use_token_type_ids:
snake_case : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case : List[Any] = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Any = self.prepare_config_and_inputs()
snake_case : Union[str, Any] = config_and_inputs
snake_case : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class _A ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = True
__lowerCamelCase : List[Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Dict = FlaxRoFormerModelTester(self )
@slow
def snake_case_ ( self ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
snake_case : Optional[int] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" ,from_pt=_a )
snake_case : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def snake_case_ ( self ):
'''simple docstring'''
snake_case : Union[str, Any] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
snake_case : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
snake_case : List[Any] = model(_a )[0]
snake_case : int = 50000
snake_case : str = (1, 6, vocab_size)
self.assertEqual(output.shape ,_a )
snake_case : Any = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] ,_a ,atol=1E-4 ) )
| 705
|
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def lowercase ( __A : bytes , __A : int ) -> np.array:
'''simple docstring'''
snake_case : List[str] = f"""{sampling_rate}"""
snake_case : Union[str, Any] = """1"""
snake_case : List[str] = """f32le"""
snake_case : Optional[Any] = [
"""ffmpeg""",
"""-i""",
"""pipe:0""",
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
try:
with subprocess.Popen(__A , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case : str = ffmpeg_process.communicate(__A )
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to load audio files from filename""" ) from error
snake_case : int = output_stream[0]
snake_case : Tuple = np.frombuffer(__A , np.floataa )
if audio.shape[0] == 0:
raise ValueError("""Malformed soundfile""" )
return audio
def lowercase ( __A : int , __A : float , __A : str = "f32le" , ) -> Optional[Any]:
'''simple docstring'''
snake_case : Dict = f"""{sampling_rate}"""
snake_case : int = """1"""
if format_for_conversion == "s16le":
snake_case : Dict = 2
elif format_for_conversion == "f32le":
snake_case : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
snake_case : Dict = platform.system()
if system == "Linux":
snake_case : List[str] = """alsa"""
snake_case : Union[str, Any] = """default"""
elif system == "Darwin":
snake_case : Optional[int] = """avfoundation"""
snake_case : str = """:0"""
elif system == "Windows":
snake_case : List[str] = """dshow"""
snake_case : Union[str, Any] = """default"""
snake_case : Union[str, Any] = [
"""ffmpeg""",
"""-f""",
format_,
"""-i""",
input_,
"""-ac""",
ac,
"""-ar""",
ar,
"""-f""",
format_for_conversion,
"""-fflags""",
"""nobuffer""",
"""-hide_banner""",
"""-loglevel""",
"""quiet""",
"""pipe:1""",
]
snake_case : List[str] = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case : Optional[Any] = _ffmpeg_stream(__A , __A )
for item in iterator:
yield item
def lowercase ( __A : int , __A : float , __A : Optional[int] = None , __A : Optional[Union[Tuple[float, float], float]] = None , __A : str = "f32le" , ) -> Optional[Any]:
'''simple docstring'''
if stream_chunk_s is not None:
snake_case : List[str] = stream_chunk_s
else:
snake_case : Tuple = chunk_length_s
snake_case : Optional[Any] = ffmpeg_microphone(__A , __A , format_for_conversion=__A )
if format_for_conversion == "s16le":
snake_case : List[Any] = np.intaa
snake_case : Dict = 2
elif format_for_conversion == "f32le":
snake_case : List[Any] = np.floataa
snake_case : Optional[Any] = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
snake_case : Tuple = chunk_length_s / 6
snake_case : str = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__A , (int, float) ):
snake_case : int = [stride_length_s, stride_length_s]
snake_case : Any = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case : int = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case : str = datetime.datetime.now()
snake_case : Tuple = datetime.timedelta(seconds=__A )
for item in chunk_bytes_iter(__A , __A , stride=(stride_left, stride_right) , stream=__A ):
# Put everything back in numpy scale
snake_case : List[str] = np.frombuffer(item["""raw"""] , dtype=__A )
snake_case : List[Any] = (
item["""stride"""][0] // size_of_sample,
item["""stride"""][1] // size_of_sample,
)
snake_case : Tuple = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def lowercase ( __A : Optional[Any] , __A : int , __A : Tuple[int, int] , __A : bool = False ) -> List[Any]:
'''simple docstring'''
snake_case : Optional[Any] = b""""""
snake_case , snake_case : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
snake_case : List[Any] = 0
for raw in iterator:
acc += raw
if stream and len(__A ) < chunk_len:
snake_case : Dict = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__A ) >= chunk_len:
# We are flushing the accumulator
snake_case : str = (_stride_left, stride_right)
snake_case : str = {"""raw""": acc[:chunk_len], """stride""": stride}
if stream:
snake_case : Optional[Any] = False
yield item
snake_case : int = stride_left
snake_case : Union[str, Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__A ) > stride_left:
snake_case : Dict = {"""raw""": acc, """stride""": (_stride_left, 0)}
if stream:
snake_case : Tuple = False
yield item
def lowercase ( __A : Optional[int] , __A : int ) -> List[str]:
'''simple docstring'''
snake_case : List[str] = 2**24 # 16Mo
try:
with subprocess.Popen(__A , stdout=subprocess.PIPE , bufsize=__A ) as ffmpeg_process:
while True:
snake_case : Union[str, Any] = ffmpeg_process.stdout.read(__A )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("""ffmpeg was not found but is required to stream audio files from filename""" ) from error
| 315
| 0
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__UpperCAmelCase : int = get_tests_dir('fixtures')
__UpperCAmelCase : Dict = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
__UpperCAmelCase : Optional[int] = get_tests_dir('fixtures/dummy-config.json')
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : str ) -> Any:
_a : List[str] = 0
def snake_case_ ( self : List[Any] ) -> Dict:
_a : List[str] = AutoFeatureExtractor.from_pretrained('''facebook/wav2vec2-base-960h''' )
self.assertIsInstance(__snake_case , __snake_case )
def snake_case_ ( self : Tuple ) -> Any:
_a : Dict = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def snake_case_ ( self : str ) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdirname:
_a : Tuple = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a : Tuple = AutoFeatureExtractor.from_pretrained(__snake_case ).to_dict()
config_dict.pop('''feature_extractor_type''' )
_a : Dict = WavaVecaFeatureExtractor(**__snake_case )
# save in new folder
model_config.save_pretrained(__snake_case )
config.save_pretrained(__snake_case )
_a : Any = AutoFeatureExtractor.from_pretrained(__snake_case )
# make sure private variable is not incorrectly saved
_a : Tuple = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(__snake_case , __snake_case )
def snake_case_ ( self : Optional[int] ) -> List[str]:
_a : str = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
def snake_case_ ( self : Tuple ) -> Tuple:
with self.assertRaisesRegex(
__snake_case , '''bert-base is not a local folder and is not a valid model identifier''' ):
_a : str = AutoFeatureExtractor.from_pretrained('''bert-base''' )
def snake_case_ ( self : Any ) -> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_a : Dict = AutoFeatureExtractor.from_pretrained(__snake_case , revision='''aaaaaa''' )
def snake_case_ ( self : Optional[Any] ) -> Union[str, Any]:
with self.assertRaisesRegex(
__snake_case , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
_a : Tuple = AutoFeatureExtractor.from_pretrained('''hf-internal-testing/config-no-model''' )
def snake_case_ ( self : List[str] ) -> Optional[int]:
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__snake_case ):
_a : int = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__snake_case ):
_a : Optional[Any] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__snake_case )
_a : str = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__snake_case )
_a : List[Any] = AutoFeatureExtractor.from_pretrained(__snake_case , trust_remote_code=__snake_case )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
def snake_case_ ( self : int ) -> List[Any]:
try:
AutoConfig.register('''custom''' , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__snake_case ):
AutoFeatureExtractor.register(__snake_case , __snake_case )
# Now that the config is registered, it can be used as any other config with the auto-API
_a : int = CustomFeatureExtractor.from_pretrained(__snake_case )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__snake_case )
_a : Tuple = AutoFeatureExtractor.from_pretrained(__snake_case )
self.assertIsInstance(__snake_case , __snake_case )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def snake_case_ ( self : List[Any] ) -> Tuple:
class lowerCamelCase ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase : str = True
try:
AutoConfig.register('''custom''' , __snake_case )
AutoFeatureExtractor.register(__snake_case , __snake_case )
# If remote code is not set, the default is to use local
_a : Optional[int] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a : Optional[int] = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a : str = AutoFeatureExtractor.from_pretrained(
'''hf-internal-testing/test_dynamic_feature_extractor''' , trust_remote_code=__snake_case )
self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''' )
self.assertTrue(not hasattr(__snake_case , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 471
|
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
def snake_case_ ( self : Tuple , __snake_case : Tuple , __snake_case : List[Any] ) -> Union[str, Any]:
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__snake_case ) for s in shape] )}.npy"""
def snake_case_ ( self : Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case_ ( self : Any , __snake_case : Optional[int]=0 , __snake_case : Dict=(4, 4, 64, 64) , __snake_case : Optional[int]=False ) -> str:
_a : str = jnp.bfloataa if fpaa else jnp.floataa
_a : Any = jnp.array(load_hf_numpy(self.get_file_format(__snake_case , __snake_case ) ) , dtype=__snake_case )
return image
def snake_case_ ( self : int , __snake_case : Union[str, Any]=False , __snake_case : List[str]="CompVis/stable-diffusion-v1-4" ) -> Tuple:
_a : Optional[int] = jnp.bfloataa if fpaa else jnp.floataa
_a : Any = '''bf16''' if fpaa else None
_a , _a : List[str] = FlaxUNetaDConditionModel.from_pretrained(
__snake_case , subfolder='''unet''' , dtype=__snake_case , revision=__snake_case )
return model, params
def snake_case_ ( self : List[str] , __snake_case : Optional[Any]=0 , __snake_case : str=(4, 77, 768) , __snake_case : Tuple=False ) -> Union[str, Any]:
_a : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
_a : List[Any] = jnp.array(load_hf_numpy(self.get_file_format(__snake_case , __snake_case ) ) , dtype=__snake_case )
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2_323, -0.1_304, 0.0_813, -0.3_093, -0.0_919, -0.1_571, -0.1_125, -0.5_806]],
[17, 0.55, [-0.0_831, -0.2_443, 0.0_901, -0.0_919, 0.3_396, 0.0_103, -0.3_743, 0.0_701]],
[8, 0.89, [-0.4_863, 0.0_859, 0.0_875, -0.1_658, 0.9_199, -0.0_114, 0.4_839, 0.4_639]],
[3, 1000, [-0.5_649, 0.2_402, -0.5_518, 0.1_248, 1.1_328, -0.2_443, -0.0_325, -1.0_078]],
# fmt: on
] )
def snake_case_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Dict ) -> int:
_a , _a : Optional[int] = self.get_unet_model(model_id='''CompVis/stable-diffusion-v1-4''' , fpaa=__snake_case )
_a : Optional[Any] = self.get_latents(__snake_case , fpaa=__snake_case )
_a : Any = self.get_encoder_hidden_states(__snake_case , fpaa=__snake_case )
_a : int = model.apply(
{'''params''': params} , __snake_case , jnp.array(__snake_case , dtype=jnp.intaa ) , encoder_hidden_states=__snake_case , ).sample
assert sample.shape == latents.shape
_a : str = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_a : int = jnp.array(__snake_case , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(__snake_case , __snake_case , atol=1E-2 )
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1_514, 0.0_807, 0.1_624, 0.1_016, -0.1_896, 0.0_263, 0.0_677, 0.2_310]],
[17, 0.55, [0.1_164, -0.0_216, 0.0_170, 0.1_589, -0.3_120, 0.1_005, -0.0_581, -0.1_458]],
[8, 0.89, [-0.1_758, -0.0_169, 0.1_004, -0.1_411, 0.1_312, 0.1_103, -0.1_996, 0.2_139]],
[3, 1000, [0.1_214, 0.0_352, -0.0_731, -0.1_562, -0.0_994, -0.0_906, -0.2_340, -0.0_539]],
# fmt: on
] )
def snake_case_ ( self : Any , __snake_case : Tuple , __snake_case : Any , __snake_case : List[str] ) -> Any:
_a , _a : Optional[int] = self.get_unet_model(model_id='''stabilityai/stable-diffusion-2''' , fpaa=__snake_case )
_a : Union[str, Any] = self.get_latents(__snake_case , shape=(4, 4, 96, 96) , fpaa=__snake_case )
_a : Optional[Any] = self.get_encoder_hidden_states(__snake_case , shape=(4, 77, 1024) , fpaa=__snake_case )
_a : List[str] = model.apply(
{'''params''': params} , __snake_case , jnp.array(__snake_case , dtype=jnp.intaa ) , encoder_hidden_states=__snake_case , ).sample
assert sample.shape == latents.shape
_a : Any = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten()) ) , dtype=jnp.floataa )
_a : Any = jnp.array(__snake_case , dtype=jnp.floataa )
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(__snake_case , __snake_case , atol=1E-2 )
| 471
| 1
|
def a__ (__lowercase :str , __lowercase :str ) -> bool:
_A : Dict = len(__lowercase ) + 1
_A : Optional[int] = len(__lowercase ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_A : Optional[Any] = [[0 for i in range(__lowercase )] for j in range(__lowercase )]
# since string of zero length match pattern of zero length
_A : int = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , __lowercase ):
_A : List[Any] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , __lowercase ):
_A : List[str] = dp[0][j - 2] if pattern[j - 1] == '''*''' else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , __lowercase ):
for j in range(1 , __lowercase ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_A : Tuple = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_A : List[str] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_A : List[Any] = dp[i - 1][j]
else:
_A : Optional[int] = 0
else:
_A : Optional[Any] = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
_UpperCamelCase : Union[str, Any] ='aab'
_UpperCamelCase : Any ='c*a*b'
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(f'''{input_string} matches the given pattern {pattern}''')
else:
print(f'''{input_string} does not match with the given pattern {pattern}''')
| 717
|
import json
import multiprocessing
import os
import re
from collections import defaultdict
import torch
from accelerate import Accelerator
from accelerate.utils import set_seed
from arguments import HumanEvalArguments
from datasets import load_dataset, load_metric
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from tqdm import tqdm
import transformers
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList
_UpperCamelCase : Union[str, Any] =['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif']
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,A__ ,A__ ,A__=None ,A__=1 ):
_A : str = tokenizer
_A : Dict = dataset
_A : int = len(A__ ) if n_tasks is None else n_tasks
_A : List[Any] = n_copies
def __iter__( self ):
_A : Dict = []
for task in range(self.n_tasks ):
# without strip, the model generate commented codes ...
prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() )
_A : Any = self.tokenizer(A__ ,padding=A__ ,return_tensors='''pt''' )
for task in range(self.n_tasks ):
for _ in range(self.n_copies ):
yield {
"ids": outputs.input_ids[task],
"task_id": task,
"input_len": outputs.attention_mask[task].sum(),
}
class UpperCAmelCase__ ( __snake_case ):
def __init__( self ,A__ ,A__ ,A__ ):
_A : Optional[Any] = start_length
_A : int = eof_strings
_A : int = tokenizer
def __call__( self ,A__ ,A__ ,**A__ ):
_A : Union[str, Any] = self.tokenizer.batch_decode(input_ids[:, self.start_length :] )
_A : Any = []
for decoded_generation in decoded_generations:
done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) )
return all(A__ )
def a__ (__lowercase :Optional[Any] ) -> List[Any]:
_A : str = re.split('''(%s)''' % '''|'''.join(__lowercase ) , __lowercase )
# last string should be ""
return "".join(string_list[:-2] )
def a__ (__lowercase :List[str] , __lowercase :Dict , __lowercase :List[str] , __lowercase :Optional[int] , __lowercase :List[Any] , __lowercase :str=20 , **__lowercase :List[str] ) -> Optional[Any]:
_A : Any = defaultdict(__lowercase ) # dict of list of generated tokens
for step, batch in tqdm(enumerate(__lowercase ) ):
with torch.no_grad():
_A : int = batch['''ids'''].shape[-1]
_A : Any = accelerator.unwrap_model(__lowercase ).generate(
input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=__lowercase , **__lowercase )
# each task is generated batch_size times
_A : Union[str, Any] = batch['''task_id'''].repeat(__lowercase )
_A : int = accelerator.pad_across_processes(
__lowercase , dim=1 , pad_index=tokenizer.pad_token_id )
_A , _A : List[str] = accelerator.gather((generated_tokens, generated_tasks) )
_A : List[str] = generated_tokens.cpu().numpy()
_A : List[str] = generated_tasks.cpu().numpy()
for task, generated_tokens in zip(__lowercase , __lowercase ):
gen_token_dict[task].append(__lowercase )
_A : Tuple = [[] for _ in range(__lowercase )]
for task, generated_tokens in gen_token_dict.items():
for s in generated_tokens:
_A : Dict = tokenizer.decode(__lowercase , skip_special_tokens=__lowercase , clean_up_tokenization_spaces=__lowercase )
code_gens[task].append(remove_last_block(__lowercase ) )
return code_gens
def a__ () -> Dict:
# Setup configuration
_A : Any = HfArgumentParser(__lowercase )
_A : Union[str, Any] = parser.parse_args()
transformers.logging.set_verbosity_error()
# enables code execution in code_eval metric
_A : Tuple = args.HF_ALLOW_CODE_EVAL
# make sure tokenizer plays nice with multiprocessing
_A : List[Any] = '''false'''
if args.num_workers is None:
_A : List[Any] = multiprocessing.cpu_count()
# Use dataset load to feed to accelerate
_A : List[str] = Accelerator()
set_seed(args.seed , device_specific=__lowercase )
# Load model and tokenizer
_A : Optional[Any] = AutoTokenizer.from_pretrained(args.model_ckpt )
_A : int = tokenizer.eos_token
_A : Any = AutoModelForCausalLM.from_pretrained(args.model_ckpt )
# Generation settings
_A : int = {
'''do_sample''': args.do_sample,
'''temperature''': args.temperature,
'''max_new_tokens''': args.max_new_tokens,
'''top_p''': args.top_p,
'''top_k''': args.top_k,
'''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , __lowercase , __lowercase )] ),
}
# Load evaluation dataset and metric
_A : str = load_dataset('''openai_humaneval''' )
_A : List[Any] = load_metric('''code_eval''' )
_A : Optional[Any] = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] )
_A : Tuple = args.n_samples // args.batch_size
_A : List[Any] = TokenizedDataset(__lowercase , human_eval['''test'''] , n_copies=__lowercase , n_tasks=__lowercase )
# do not confuse args.batch_size, which is actually the num_return_sequences
_A : Tuple = DataLoader(__lowercase , batch_size=1 )
# Run a quick test to see if code evaluation is enabled
try:
_A : List[str] = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] )
except ValueError as exception:
print(
'''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`'''
''' flag to enable code evaluation.''' )
raise exception
_A , _A : Optional[Any] = accelerator.prepare(__lowercase , __lowercase )
_A : Tuple = complete_code(
__lowercase , __lowercase , __lowercase , __lowercase , n_tasks=__lowercase , batch_size=args.batch_size , **__lowercase , )
if accelerator.is_main_process:
_A : int = []
for task in tqdm(range(__lowercase ) ):
_A : List[str] = human_eval['''test'''][task]['''test''']
_A : int = f"""check({human_eval['test'][task]['entry_point']})"""
references.append('''\n''' + test_func + '''\n''' + entry_point )
# Evaluate completions with "code_eval" metric
_A , _A : Union[str, Any] = code_eval_metric.compute(
references=__lowercase , predictions=__lowercase , num_workers=args.num_workers )
print(f"""Results: {pass_at_k}""" )
# Save results to json file
with open(args.output_file , '''w''' ) as fp:
json.dump(__lowercase , __lowercase )
# For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing
# https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script
if __name__ == "__main__":
main()
| 332
| 0
|
"""simple docstring"""
import io
import os
import unicodedata
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase : Dict = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = """▁"""
_lowerCAmelCase : int = {"""vocab_file""": """vocab.txt""", """sentencepiece_model_ckpt""": """sentencepiece.bpe.model"""}
_lowerCAmelCase : Any = {
"""sentencepiece_model_file""": """sentencepiece.bpe.model""",
"""vocab_file""": """vocab.txt""",
}
_lowerCAmelCase : List[Any] = {
"""vocab_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt""",
},
"""sentencepiece_model_file""": {
"""ernie-m-base""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
"""ernie-m-large""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model""",
},
}
_lowerCAmelCase : Any = {
"""ernie-m-base""": 514,
"""ernie-m-large""": 514,
}
_lowerCAmelCase : List[Any] = {
"""ernie-m-base""": {"""do_lower_case""": False},
"""ernie-m-large""": {"""do_lower_case""": False},
}
class lowerCAmelCase__ ( __magic_name__ ):
SCREAMING_SNAKE_CASE_ =["input_ids"]
SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ =PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ =RESOURCE_FILES_NAMES
def __init__( self : int , snake_case__ : str , snake_case__ : Tuple=None , snake_case__ : Any=False , snake_case__ : Union[str, Any]="utf8" , snake_case__ : Dict="[UNK]" , snake_case__ : Tuple="[SEP]" , snake_case__ : List[str]="[PAD]" , snake_case__ : List[str]="[CLS]" , snake_case__ : Union[str, Any]="[MASK]" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : List[Any] , ):
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
UpperCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , vocab_file=snake_case__ , encoding=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , )
UpperCAmelCase__ : Tuple = do_lower_case
UpperCAmelCase__ : List[str] = sentencepiece_model_ckpt
UpperCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(snake_case__ )
# to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning
if vocab_file is not None:
UpperCAmelCase__ : Optional[Any] = self.load_vocab(filepath=snake_case__ )
else:
UpperCAmelCase__ : int = {self.sp_model.id_to_piece(snake_case__ ): id for id in range(self.sp_model.get_piece_size() )}
UpperCAmelCase__ : str = {v: k for k, v in self.vocab.items()}
def __a ( self : List[Any] , snake_case__ : Dict ):
'''simple docstring'''
if text is None:
return None
UpperCAmelCase__ : Optional[Any] = self.tokenize(snake_case__ )
UpperCAmelCase__ , UpperCAmelCase__ : Tuple = "", []
for i, ch in enumerate(snake_case__ ):
if ch in self.SP_CHAR_MAPPING:
UpperCAmelCase__ : Any = self.SP_CHAR_MAPPING.get(snake_case__ )
else:
UpperCAmelCase__ : str = unicodedata.normalize("NFKC" , snake_case__ )
if self.is_whitespace(snake_case__ ):
continue
normalized_text += ch
char_mapping.extend([i] * len(snake_case__ ) )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = normalized_text, [], 0
if self.do_lower_case:
UpperCAmelCase__ : Tuple = text.lower()
for token in split_tokens:
if token[:1] == "▁":
UpperCAmelCase__ : str = token[1:]
UpperCAmelCase__ : Union[str, Any] = text[offset:].index(snake_case__ ) + offset
UpperCAmelCase__ : int = start + len(snake_case__ )
token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) )
UpperCAmelCase__ : Union[str, Any] = end
return token_mapping
@property
def __a ( self : Any ):
'''simple docstring'''
return len(self.vocab )
def __a ( self : Union[str, Any] ):
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def __getstate__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.__dict__.copy()
UpperCAmelCase__ : List[str] = None
return state
def __setstate__( self : Union[str, Any] , snake_case__ : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ : Dict = {}
UpperCAmelCase__ : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.sentencepiece_model_ckpt )
def __a ( self : List[str] , snake_case__ : Dict ):
'''simple docstring'''
return "".join((self.SP_CHAR_MAPPING.get(snake_case__ , snake_case__ ) for c in text) )
def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str=False , snake_case__ : str=6_4 , snake_case__ : str=0.1 ):
'''simple docstring'''
if self.sp_model_kwargs.get("enable_sampling" ) is True:
UpperCAmelCase__ : int = True
if self.sp_model_kwargs.get("alpha" ) is not None:
UpperCAmelCase__ : Optional[int] = self.sp_model_kwargs.get("alpha" )
if self.sp_model_kwargs.get("nbest_size" ) is not None:
UpperCAmelCase__ : List[Any] = self.sp_model_kwargs.get("nbest_size" )
if not enable_sampling:
UpperCAmelCase__ : Union[str, Any] = self.sp_model.EncodeAsPieces(snake_case__ )
else:
UpperCAmelCase__ : List[str] = self.sp_model.SampleEncodeAsPieces(snake_case__ , snake_case__ , snake_case__ )
UpperCAmelCase__ : Any = []
for pi, piece in enumerate(snake_case__ ):
if piece == SPIECE_UNDERLINE:
if not pieces[pi + 1].startswith(snake_case__ ) and pi != 0:
new_pieces.append(snake_case__ )
continue
else:
continue
UpperCAmelCase__ : List[Any] = 0
for i, chunk in enumerate(snake_case__ ):
if chunk == SPIECE_UNDERLINE:
continue
if self.is_ch_char(snake_case__ ) or self.is_punct(snake_case__ ):
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
new_pieces.append(snake_case__ )
UpperCAmelCase__ : str = i + 1
elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase__ : int = i
elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit():
if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE:
new_pieces.append(piece[lst_i:i] )
UpperCAmelCase__ : Optional[int] = i
if len(snake_case__ ) > lst_i:
new_pieces.append(piece[lst_i:] )
return new_pieces
def __a ( self : List[str] , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def __a ( self : Tuple , snake_case__ : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.convert_ids_to_tokens(snake_case__ )
UpperCAmelCase__ : Any = "".join(snake_case__ ).replace(snake_case__ , " " ).strip()
return out_string
def __a ( self : Tuple , snake_case__ : List[Any] ):
'''simple docstring'''
return self.vocab.get(snake_case__ , self.vocab.get(self.unk_token ) )
def __a ( self : int , snake_case__ : List[Any] ):
'''simple docstring'''
return self.reverse_vocab.get(snake_case__ , self.unk_token )
def __a ( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Dict=None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase__ : Optional[Any] = [self.cls_token_id]
UpperCAmelCase__ : str = [self.sep_token_id]
return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep
def __a ( self : int , snake_case__ : List[str] , snake_case__ : str=None ):
'''simple docstring'''
if offset_mapping_a is None:
return [(0, 0)] + offset_mapping_a + [(0, 0)]
return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)]
def __a ( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"You should not supply a second sequence if the provided sequence of "
"ids is already formatted with special tokens for the model." )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1]
return [1] + ([0] * len(snake_case__ )) + [1]
def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ):
'''simple docstring'''
# called when `add_special_tokens` is True, so align with `build_inputs_with_special_tokens` method
if token_ids_a is None:
# [CLS] X [SEP]
return (len(snake_case__ ) + 2) * [0]
# [CLS] A [SEP] [SEP] B [SEP]
return [0] * (len(snake_case__ ) + 1) + [1] * (len(snake_case__ ) + 3)
def __a ( self : Optional[Any] , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if "\u4e00" <= char <= "\u9fff":
return True
return False
def __a ( self : Dict , snake_case__ : Optional[int] ):
'''simple docstring'''
if ("a" <= char <= "z") or ("A" <= char <= "Z"):
return True
return False
def __a ( self : str , snake_case__ : Union[str, Any] ):
'''simple docstring'''
if char in ",;:.?!~,;:。?!《》【】":
return True
return False
def __a ( self : Dict , snake_case__ : Any ):
'''simple docstring'''
if char == " " or char == "\t" or char == "\n" or char == "\r":
return True
if len(snake_case__ ) == 1:
UpperCAmelCase__ : str = unicodedata.category(snake_case__ )
if cat == "Zs":
return True
return False
def __a ( self : Union[str, Any] , snake_case__ : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = {}
with io.open(snake_case__ , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(snake_case__ ):
UpperCAmelCase__ : str = line.rstrip("\n" )
UpperCAmelCase__ : Dict = int(snake_case__ )
return token_to_idx
def __a ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[str] = None ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = 0
if os.path.isdir(snake_case__ ):
UpperCAmelCase__ : str = os.path.join(
snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase__ : Any = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(snake_case__ , "w" , encoding="utf-8" ) as writer:
for token, token_index in sorted(self.vocab.items() , key=lambda snake_case__ : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase__ : str = token_index
writer.write(token + "\n" )
index += 1
UpperCAmelCase__ : Union[str, Any] = os.path.join(snake_case__ , "sentencepiece.bpe.model" )
with open(snake_case__ , "wb" ) as fi:
UpperCAmelCase__ : str = self.sp_model.serialized_model_proto()
fi.write(snake_case__ )
return (vocab_file,)
| 438
|
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCAmelCase : Optional[int] = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
_lowerCAmelCase : Optional[Any] = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : str = list(state_dict.keys() )
for name in state_dict_keys:
UpperCAmelCase__ : Dict = state_dict.pop(snake_case )
# emb -> embedding
if name.startswith("emb." ):
UpperCAmelCase__ : str = name.replace("emb." , "embeddings." )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("blocks.0.ln0" ):
UpperCAmelCase__ : List[str] = name.replace("blocks.0.ln0" , "blocks.0.pre_ln" )
# att -> attention
UpperCAmelCase__ : Optional[int] = re.sub(r"blocks\.(\d+)\.att" , r"blocks.\1.attention" , snake_case )
# ffn -> feed_forward
UpperCAmelCase__ : Any = re.sub(r"blocks\.(\d+)\.ffn" , r"blocks.\1.feed_forward" , snake_case )
# time_mix_k -> time_mix_key and reshape
if name.endswith(".time_mix_k" ):
UpperCAmelCase__ : List[Any] = name.replace(".time_mix_k" , ".time_mix_key" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(".time_mix_v" ):
UpperCAmelCase__ : int = name.replace(".time_mix_v" , ".time_mix_value" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(".time_mix_r" ):
UpperCAmelCase__ : Optional[Any] = name.replace(".time_mix_r" , ".time_mix_receptance" )
if name != "head.weight":
UpperCAmelCase__ : int = "rwkv." + name
UpperCAmelCase__ : Dict = weight
return state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Dict=None , snake_case : List[Any]=None , snake_case : List[str]=False , snake_case : str=None )-> int:
'''simple docstring'''
if tokenizer_file is None:
print("No `--tokenizer_file` provided, we will use the default tokenizer." )
UpperCAmelCase__ : str = 5_0277
UpperCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b" )
else:
UpperCAmelCase__ : Union[str, Any] = PreTrainedTokenizerFast(tokenizer_file=snake_case )
UpperCAmelCase__ : Tuple = len(snake_case )
tokenizer.save_pretrained(snake_case )
# 2. Build the config
UpperCAmelCase__ : Dict = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
UpperCAmelCase__ : List[Any] = candidate
break
if size is None:
raise ValueError("Could not infer the size, please provide it with the `--size` argument." )
if size not in possible_sizes:
raise ValueError(f'`size` should be one of {possible_sizes}, got {size}.' )
UpperCAmelCase__ : int = RwkvConfig(
vocab_size=snake_case , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(snake_case )
# 3. Download model file then convert state_dict
UpperCAmelCase__ : Optional[int] = hf_hub_download(snake_case , snake_case )
UpperCAmelCase__ : Optional[Any] = torch.load(snake_case , map_location="cpu" )
UpperCAmelCase__ : Tuple = convert_state_dict(snake_case )
# 4. Split in shards and save
UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = shard_checkpoint(snake_case )
for shard_file, shard in shards.items():
torch.save(snake_case , os.path.join(snake_case , snake_case ) )
if index is not None:
UpperCAmelCase__ : Tuple = os.path.join(snake_case , snake_case )
# Save the index as well
with open(snake_case , "w" , encoding="utf-8" ) as f:
UpperCAmelCase__ : Any = json.dumps(snake_case , indent=2 , sort_keys=snake_case ) + "\n"
f.write(snake_case )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model." )
UpperCAmelCase__ : List[str] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
UpperCAmelCase__ : Any = torch.load(os.path.join(snake_case , snake_case ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(snake_case , snake_case ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("Please provide a `model_name` to push the model to the Hub." )
UpperCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(snake_case )
model.push_to_hub(snake_case , max_shard_size="2GB" )
tokenizer.push_to_hub(snake_case )
if __name__ == "__main__":
_lowerCAmelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
_lowerCAmelCase : Dict = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 438
| 1
|
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
def __init__( self : Dict , __snake_case : List[Any] , __snake_case : Any=13 , __snake_case : Optional[Any]=30 , __snake_case : int=2 , __snake_case : Optional[int]=3 , __snake_case : Dict=True , __snake_case : str=True , __snake_case : Any=32 , __snake_case : List[Any]=2 , __snake_case : List[str]=4 , __snake_case : Any=37 , __snake_case : Optional[Any]="gelu" , __snake_case : List[str]=0.1 , __snake_case : List[str]=0.1 , __snake_case : Optional[int]=10 , __snake_case : int=0.02 , __snake_case : Dict=3 , __snake_case : Optional[int]=None , ) -> Tuple:
_a : Any = parent
_a : List[str] = batch_size
_a : Optional[int] = image_size
_a : Optional[Any] = patch_size
_a : int = num_channels
_a : Any = is_training
_a : Any = use_labels
_a : int = hidden_size
_a : Tuple = num_hidden_layers
_a : Tuple = num_attention_heads
_a : Optional[Any] = intermediate_size
_a : Tuple = hidden_act
_a : Dict = hidden_dropout_prob
_a : Optional[int] = attention_probs_dropout_prob
_a : List[str] = type_sequence_label_size
_a : Tuple = initializer_range
_a : Tuple = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_a : List[str] = (image_size // patch_size) ** 2
_a : str = num_patches + 1
def snake_case_ ( self : Optional[int] ) -> str:
_a : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : List[str] = None
if self.use_labels:
_a : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a : Optional[int] = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : Optional[int] ) -> Tuple:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def snake_case_ ( self : int , __snake_case : str , __snake_case : Any , __snake_case : Tuple ) -> Dict:
_a : Optional[Any] = TFViTModel(config=__snake_case )
_a : Tuple = model(__snake_case , training=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_a : Dict = self.image_size // 2
_a : Tuple = pixel_values[:, :, :image_size, :image_size]
_a : int = model(__snake_case , interpolate_pos_encoding=__snake_case , training=__snake_case )
_a : List[str] = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def snake_case_ ( self : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Dict ) -> Any:
_a : int = self.type_sequence_label_size
_a : str = TFViTForImageClassification(__snake_case )
_a : List[Any] = model(__snake_case , labels=__snake_case , training=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_a : str = self.image_size // 2
_a : Dict = pixel_values[:, :, :image_size, :image_size]
_a : int = model(__snake_case , interpolate_pos_encoding=__snake_case , training=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a : Any = 1
_a : Dict = TFViTForImageClassification(__snake_case )
_a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a : Dict = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def snake_case_ ( self : str ) -> Any:
_a : int = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
UpperCAmelCase : Optional[Any] = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase : Dict = (
{'feature-extraction': TFViTModel, 'image-classification': TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase : int = False
UpperCAmelCase : int = False
UpperCAmelCase : List[Any] = False
def snake_case_ ( self : Optional[int] ) -> Optional[Any]:
_a : List[str] = TFViTModelTester(self )
_a : List[Any] = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def snake_case_ ( self : Optional[Any] ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def snake_case_ ( self : Any ) -> List[Any]:
pass
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def snake_case_ ( self : Any ) -> Any:
pass
def snake_case_ ( self : int ) -> Optional[int]:
_a , _a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Dict = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_a : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , tf.keras.layers.Layer ) )
def snake_case_ ( self : Optional[Any] ) -> Tuple:
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : Tuple = model_class(__snake_case )
_a : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Union[str, Any] = [*signature.parameters.keys()]
_a : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , __snake_case )
def snake_case_ ( self : int ) -> Union[str, Any]:
_a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def snake_case_ ( self : Tuple ) -> Any:
_a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
@slow
def snake_case_ ( self : List[Any] ) -> List[Any]:
_a : Tuple = TFViTModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(__snake_case )
def lowerCamelCase_ ( ):
_a : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase ( unittest.TestCase ):
@cached_property
def snake_case_ ( self : Dict ) -> int:
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def snake_case_ ( self : Optional[Any] ) -> List[str]:
_a : int = TFViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' )
_a : Tuple = self.default_image_processor
_a : List[Any] = prepare_img()
_a : int = image_processor(images=__snake_case , return_tensors='''tf''' )
# forward pass
_a : Dict = model(**__snake_case )
# verify the logits
_a : Dict = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __snake_case )
_a : Optional[Any] = tf.constant([-0.2_744, 0.8_215, -0.0_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __snake_case , atol=1E-4 )
| 249
|
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def lowerCamelCase_ ( UpperCamelCase_ = 8 ):
_a : int = ascii_letters + digits + punctuation
return "".join(secrets.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(UpperCamelCase_ )
_a : Dict = i // 3
_a : Union[str, Any] = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
_a : Optional[Any] = (
chars_incl
+ random(UpperCamelCase_ , quotient + remainder )
+ random(UpperCamelCase_ , UpperCamelCase_ )
+ random(UpperCamelCase_ , UpperCamelCase_ )
)
_a : List[str] = list(UpperCamelCase_ )
shuffle(UpperCamelCase_ )
return "".join(UpperCamelCase_ )
# random is a generalised function for letters, characters and numbers
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return "".join(secrets.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
pass # Put your code here...
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ = 8 ):
if len(UpperCamelCase_ ) < min_length:
# Your Password must be at least 8 characters long
return False
_a : List[str] = any(char in ascii_uppercase for char in password )
_a : Optional[Any] = any(char in ascii_lowercase for char in password )
_a : int = any(char in digits for char in password )
_a : Optional[Any] = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def lowerCamelCase_ ( ):
_a : Any = int(input('''Please indicate the max length of your password: ''' ).strip() )
_a : Any = input(
'''Please indicate the characters that must be in your password: ''' ).strip()
print('''Password generated:''' , password_generator(UpperCamelCase_ ) )
print(
'''Alternative Password generated:''' , alternative_password_generator(UpperCamelCase_ , UpperCamelCase_ ) , )
print('''[If you are thinking of using this passsword, You better save it.]''' )
if __name__ == "__main__":
main()
| 249
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=4 , ):
"""simple docstring"""
snake_case : Dict = parent
snake_case : Union[str, Any] = batch_size
snake_case : Union[str, Any] = seq_length
snake_case : int = is_training
snake_case : int = use_attention_mask
snake_case : Tuple = use_token_type_ids
snake_case : Tuple = use_labels
snake_case : str = vocab_size
snake_case : Any = hidden_size
snake_case : str = num_hidden_layers
snake_case : int = num_attention_heads
snake_case : Optional[int] = intermediate_size
snake_case : Tuple = hidden_act
snake_case : Dict = hidden_dropout_prob
snake_case : List[Any] = attention_probs_dropout_prob
snake_case : List[str] = max_position_embeddings
snake_case : Any = type_vocab_size
snake_case : Tuple = type_sequence_label_size
snake_case : Dict = initializer_range
snake_case : str = num_choices
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case : Union[str, Any] = None
if self.use_attention_mask:
snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case : List[str] = None
if self.use_token_type_ids:
snake_case : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case : Any = RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = self.prepare_config_and_inputs()
snake_case : Optional[Any] = config_and_inputs
snake_case : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = self.prepare_config_and_inputs()
snake_case : List[str] = config_and_inputs
snake_case : Any = True
snake_case : Dict = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class lowerCamelCase__ ( lowercase__ , unittest.TestCase ):
a__ : List[str] = True
a__ : Optional[Any] = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Union[str, Any] = FlaxRobertaPreLayerNormModelTester(self )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
snake_case : Dict = model_class_name.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase_ )
snake_case : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase_ )
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase_ )
snake_case : Any = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
snake_case : Union[str, Any] = model(UpperCamelCase_ )[0]
snake_case : int = [1, 11, 50_265]
self.assertEqual(list(output.shape ) , UpperCamelCase_ )
# compare the actual values for a slice.
snake_case : Union[str, Any] = np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = FlaxRobertaPreLayerNormModel.from_pretrained("andreasmadsen/efficient_mlm_m0.40" , from_pt=UpperCamelCase_ )
snake_case : List[str] = np.array([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] , dtype=jnp.intaa )
snake_case : Dict = model(UpperCamelCase_ )[0]
# compare the actual values for a slice.
snake_case : Optional[Any] = np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) )
| 134
|
import os
import textwrap
import pyarrow as pa
import pytest
from datasets import ClassLabel, Features, Image
from datasets.packaged_modules.csv.csv import Csv
from ..utils import require_pil
@pytest.fixture
def UpperCamelCase ( _a ) -> List[str]:
'''simple docstring'''
lowercase_ :Union[str, Any] = tmp_path / '''file.csv'''
lowercase_ :List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20
''' )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
@pytest.fixture
def UpperCamelCase ( _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :List[Any] = tmp_path / '''malformed_file.csv'''
lowercase_ :List[str] = textwrap.dedent(
'''\
header1,header2
1,2
10,20,
''' )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
@pytest.fixture
def UpperCamelCase ( _a , _a ) -> Optional[Any]:
'''simple docstring'''
lowercase_ :Dict = tmp_path / '''csv_with_image.csv'''
lowercase_ :int = textwrap.dedent(
f"\\n image\n {image_file}\n " )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
@pytest.fixture
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
lowercase_ :int = tmp_path / '''csv_with_label.csv'''
lowercase_ :int = textwrap.dedent(
'''\
label
good
bad
good
''' )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
@pytest.fixture
def UpperCamelCase ( _a ) -> str:
'''simple docstring'''
lowercase_ :Dict = tmp_path / '''csv_with_int_list.csv'''
lowercase_ :Any = textwrap.dedent(
'''\
int_list
1 2 3
4 5 6
7 8 9
''' )
with open(_a , '''w''' ) as f:
f.write(_a )
return str(_a )
def UpperCamelCase ( _a , _a , _a ) -> Any:
'''simple docstring'''
lowercase_ :Dict = Csv()
lowercase_ :Tuple = csv._generate_tables([[csv_file, malformed_csv_file]] )
with pytest.raises(_a , match='''Error tokenizing data''' ):
for _ in generator:
pass
assert any(
record.levelname == '''ERROR'''
and '''Failed to read file''' in record.message
and os.path.basename(_a ) in record.message
for record in caplog.records )
@require_pil
def UpperCamelCase ( _a ) -> Tuple:
'''simple docstring'''
with open(_a , encoding='''utf-8''' ) as f:
lowercase_ :Any = f.read().splitlines()[1]
lowercase_ :List[Any] = Csv(encoding='''utf-8''' , features=Features({'''image''': Image()} ) )
lowercase_ :List[Any] = csv._generate_tables([[csv_file_with_image]] )
lowercase_ :Dict = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''image''' ).type == Image()()
lowercase_ :Optional[int] = pa_table.to_pydict()['''image''']
assert generated_content == [{"path": image_file, "bytes": None}]
def UpperCamelCase ( _a ) -> int:
'''simple docstring'''
with open(_a , encoding='''utf-8''' ) as f:
lowercase_ :List[Any] = f.read().splitlines()[1:]
lowercase_ :Optional[Any] = Csv(encoding='''utf-8''' , features=Features({'''label''': ClassLabel(names=['''good''', '''bad'''] )} ) )
lowercase_ :List[Any] = csv._generate_tables([[csv_file_with_label]] )
lowercase_ :Optional[int] = pa.concat_tables([table for _, table in generator] )
assert pa_table.schema.field('''label''' ).type == ClassLabel(names=['''good''', '''bad'''] )()
lowercase_ :int = pa_table.to_pydict()['''label''']
assert generated_content == [ClassLabel(names=['''good''', '''bad'''] ).straint(_a ) for label in labels]
def UpperCamelCase ( _a ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ :int = Csv(encoding='''utf-8''' , sep=''',''' , converters={'''int_list''': lambda _a : [int(_a ) for i in x.split()]} )
lowercase_ :Any = csv._generate_tables([[csv_file_with_int_list]] )
lowercase_ :Union[str, Any] = pa.concat_tables([table for _, table in generator] )
assert pa.types.is_list(pa_table.schema.field('''int_list''' ).type )
lowercase_ :List[str] = pa_table.to_pydict()['''int_list''']
assert generated_content == [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
| 257
| 0
|
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : str ,_a : Dict ,):
'''simple docstring'''
A_ : Union[str, Any] = parent
A_ : Union[str, Any] = 13
A_ : List[str] = 7
A_ : int = True
A_ : Optional[int] = True
A_ : Any = True
A_ : Any = 99
A_ : Union[str, Any] = 32
A_ : Any = 2
A_ : Dict = 4
A_ : Tuple = 37
A_ : List[str] = """gelu"""
A_ : Dict = 0.1
A_ : str = 0.1
A_ : int = 512
A_ : str = 16
A_ : Dict = 2
A_ : Dict = 0.02
A_ : Dict = 3
A_ : int = 4
A_ : List[str] = None
def _a ( self : str ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : Union[str, Any] = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[Any] = None
A_ : Optional[int] = None
A_ : str = None
if self.use_labels:
A_ : Any = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : Any = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[str] ):
'''simple docstring'''
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = self.prepare_config_and_inputs()
A_ : Optional[Any] = True
A_ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A_ : int = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _a ( self : Any ,_a : List[str] ,_a : str ,_a : Dict ,_a : Optional[Any] ,_a : Tuple ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : str = TFEsmModel(config=_a )
A_ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A_ : Tuple = model(_a )
A_ : Union[str, Any] = [input_ids, input_mask]
A_ : Dict = model(_a )
A_ : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : List[Any] ,_a : str ,_a : int ,_a : List[str] ,_a : Dict ,_a : List[str] ,_a : Optional[int] ,_a : Any ,_a : int ,):
'''simple docstring'''
A_ : Dict = True
A_ : Tuple = TFEsmModel(config=_a )
A_ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""encoder_hidden_states""": encoder_hidden_states,
"""encoder_attention_mask""": encoder_attention_mask,
}
A_ : Tuple = model(_a )
A_ : Union[str, Any] = [input_ids, input_mask]
A_ : Tuple = model(_a ,encoder_hidden_states=_a )
# Also check the case where encoder outputs are not passed
A_ : Union[str, Any] = model(_a ,attention_mask=_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : str ,_a : List[str] ,_a : List[Any] ,_a : Any ,_a : List[Any] ,_a : Dict ,_a : Any ):
'''simple docstring'''
A_ : Optional[Any] = TFEsmForMaskedLM(config=_a )
A_ : List[str] = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Optional[int] ,_a : str ,_a : int ,_a : List[Any] ,_a : Union[str, Any] ,_a : Any ,_a : List[Any] ):
'''simple docstring'''
A_ : List[Any] = self.num_labels
A_ : Optional[int] = TFEsmForTokenClassification(config=_a )
A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
A_ : List[str] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : str ):
'''simple docstring'''
A_ : str = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : Dict = config_and_inputs
A_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
a_ = (
{
"""feature-extraction""": TFEsmModel,
"""fill-mask""": TFEsmForMaskedLM,
"""text-classification""": TFEsmForSequenceClassification,
"""token-classification""": TFEsmForTokenClassification,
"""zero-shot""": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Tuple = TFEsmModelTester(self )
A_ : Union[str, Any] = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Any ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _a ( self : Any ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = TFEsmModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@unittest.skip("""Protein models do not support embedding resizing.""" )
def _a ( self : str ):
'''simple docstring'''
pass
@unittest.skip("""Protein models do not support embedding resizing.""" )
def _a ( self : Optional[int] ):
'''simple docstring'''
pass
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ , A_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[Any] = model_class(_a )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A_ : Dict = model.get_bias()
assert isinstance(_a ,_a )
for k, v in name.items():
assert isinstance(_a ,tf.Variable )
else:
A_ : str = model.get_output_embeddings()
assert x is None
A_ : Any = model.get_bias()
assert name is None
@require_tf
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[str] = TFEsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A_ : Dict = tf.constant([[0, 1, 2, 3, 4, 5]] )
A_ : int = model(_a )[0]
A_ : int = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,_a )
# compare the actual values for a slice.
A_ : List[Any] = tf.constant(
[
[
[8.921518, -10.589814, -6.4671307],
[-6.3967156, -13.911377, -1.1211915],
[-7.781247, -13.951557, -3.740592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Tuple = TFEsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
A_ : str = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A_ : Optional[int] = model(_a )[0]
# compare the actual values for a slice.
A_ : Union[str, Any] = tf.constant(
[
[
[0.14443092, 0.54125327, 0.3247739],
[0.30340484, 0.00526676, 0.31077722],
[0.32278043, -0.24987096, 0.3414628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 27
|
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27
| 1
|
def a_ ( lowerCAmelCase_ : list, lowerCAmelCase_ : int = 0 ):
__lowerCAmelCase = length or len(lowerCAmelCase_ )
__lowerCAmelCase = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
__lowerCAmelCase , __lowerCAmelCase = list_data[i + 1], list_data[i]
__lowerCAmelCase = True
return list_data if not swapped else bubble_sort(lowerCAmelCase_, length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 53
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class lowerCamelCase ( _A ):
snake_case_ = "gpt_neox"
def __init__( self , a_=50_432 , a_=6_144 , a_=44 , a_=64 , a_=24_576 , a_="gelu" , a_=0.25 , a_=10_000 , a_=0.0 , a_=0.0 , a_=0.1 , a_=2_048 , a_=0.02 , a_=1e-5 , a_=True , a_=0 , a_=2 , a_=False , a_=True , a_=None , **a_ , ):
super().__init__(bos_token_id=a_ , eos_token_id=a_ , **a_ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : str = max_position_embeddings
lowerCAmelCase : int = hidden_size
lowerCAmelCase : Optional[Any] = num_hidden_layers
lowerCAmelCase : int = num_attention_heads
lowerCAmelCase : Any = intermediate_size
lowerCAmelCase : Tuple = hidden_act
lowerCAmelCase : Any = rotary_pct
lowerCAmelCase : str = rotary_emb_base
lowerCAmelCase : Tuple = attention_dropout
lowerCAmelCase : Optional[int] = hidden_dropout
lowerCAmelCase : int = classifier_dropout
lowerCAmelCase : str = initializer_range
lowerCAmelCase : str = layer_norm_eps
lowerCAmelCase : List[Any] = use_cache
lowerCAmelCase : Dict = tie_word_embeddings
lowerCAmelCase : Any = use_parallel_residual
lowerCAmelCase : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!" )
def _lowerCamelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a_ ) or len(self.rope_scaling ) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
F'''got {self.rope_scaling}''' )
lowerCAmelCase : int = self.rope_scaling.get("type" , a_ )
lowerCAmelCase : int = self.rope_scaling.get("factor" , a_ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(a_ , a_ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
| 525
| 0
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class A__ ( nn.Module ):
def __init__( self ):
'''simple docstring'''
super().__init__()
UpperCamelCase : Optional[Any] = nn.Linear(3 , 4 )
UpperCamelCase : Dict = nn.BatchNormad(4 )
UpperCamelCase : List[str] = nn.Linear(4 , 5 )
def __UpperCamelCase( self , A_ ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(A_ ) ) )
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , model.state_dict() )
UpperCamelCase : Optional[Any] = os.path.join(A_ , "index.json" )
self.assertTrue(os.path.isfile(A_ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase : Optional[int] = os.path.join(A_ , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(A_ ) )
# TODO: add tests on the fact weights are properly loaded
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase : Any = torch.randn(2 , 3 , dtype=A_ )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase : str = offload_weight(A_ , "weight" , A_ , {} )
UpperCamelCase : Optional[Any] = os.path.join(A_ , "weight.dat" )
self.assertTrue(os.path.isfile(A_ ) )
self.assertDictEqual(A_ , {"weight": {"shape": [2, 3], "dtype": str(A_ ).split("." )[1]}} )
UpperCamelCase : List[Any] = load_offloaded_weight(A_ , index["weight"] )
self.assertTrue(torch.equal(A_ , A_ ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = ModelForTest()
UpperCamelCase : Optional[int] = model.state_dict()
UpperCamelCase : List[Any] = {k: v for k, v in state_dict.items() if "linear2" not in k}
UpperCamelCase : Optional[int] = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
UpperCamelCase : Optional[int] = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
UpperCamelCase : str = {k: v for k, v in state_dict.items() if "weight" in k}
UpperCamelCase : List[str] = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
UpperCamelCase : Any = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(A_ , A_ )
# Duplicates are removed
UpperCamelCase : Dict = OffloadedWeightsLoader(state_dict=A_ , save_folder=A_ )
# Every key is there with the right value
self.assertEqual(sorted(A_ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(A_ , weight_map[key] ) )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = {"a.1": 0, "a.10": 1, "a.2": 2}
UpperCamelCase : int = extract_submodules_state_dict(A_ , ["a.1", "a.2"] )
self.assertDictEqual(A_ , {"a.1": 0, "a.2": 2} )
UpperCamelCase : Tuple = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
UpperCamelCase : Dict = extract_submodules_state_dict(A_ , ["a.1", "a.2"] )
self.assertDictEqual(A_ , {"a.1.a": 0, "a.2.a": 2} )
| 38
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/config.json""",
"""umberto-commoncrawl-cased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"""
),
"""umberto-wikipedia-uncased-v1""": (
"""https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"""
),
}
class A__ ( __snake_case ):
_UpperCAmelCase :Union[str, Any] = 'camembert'
def __init__( self , A_=3_0522 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=2 , A_=0.02 , A_=1e-12 , A_=1 , A_=0 , A_=2 , A_="absolute" , A_=True , A_=None , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase : List[str] = vocab_size
UpperCamelCase : Union[str, Any] = hidden_size
UpperCamelCase : Any = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Dict = hidden_act
UpperCamelCase : str = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = max_position_embeddings
UpperCamelCase : Optional[Any] = type_vocab_size
UpperCamelCase : int = initializer_range
UpperCamelCase : List[str] = layer_norm_eps
UpperCamelCase : Dict = position_embedding_type
UpperCamelCase : int = use_cache
UpperCamelCase : List[str] = classifier_dropout
class A__ ( __snake_case ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Optional[int] = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCamelCase : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 38
| 1
|
'''simple docstring'''
def __a(SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if num <= 0:
raise ValueError("Input must be a positive integer" )
_lowerCAmelCase = [True] * (num + 1)
_lowerCAmelCase = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
_SCREAMING_SNAKE_CASE = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Union[str, Any] = "blenderbot-small"
__lowerCamelCase : Optional[Any] = ["past_key_values"]
__lowerCamelCase : str = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self , _lowerCAmelCase=50265 , _lowerCAmelCase=512 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=8 , _lowerCAmelCase=2048 , _lowerCAmelCase=16 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase="gelu" , _lowerCAmelCase=512 , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.02 , _lowerCAmelCase=1 , _lowerCAmelCase=False , _lowerCAmelCase=0 , _lowerCAmelCase=1 , _lowerCAmelCase=2 , _lowerCAmelCase=2 , **_lowerCAmelCase , ) -> Dict:
_lowerCAmelCase = vocab_size
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = d_model
_lowerCAmelCase = encoder_ffn_dim
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = encoder_attention_heads
_lowerCAmelCase = decoder_ffn_dim
_lowerCAmelCase = decoder_layers
_lowerCAmelCase = decoder_attention_heads
_lowerCAmelCase = dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = activation_dropout
_lowerCAmelCase = activation_function
_lowerCAmelCase = init_std
_lowerCAmelCase = encoder_layerdrop
_lowerCAmelCase = decoder_layerdrop
_lowerCAmelCase = use_cache
_lowerCAmelCase = encoder_layers
_lowerCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_lowerCAmelCase , bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , is_encoder_decoder=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , forced_eos_token_id=_lowerCAmelCase , **_lowerCAmelCase , )
class lowerCAmelCase_ ( __magic_name__ ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase = {0: "batch"}
_lowerCAmelCase = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
_lowerCAmelCase = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
else:
_lowerCAmelCase = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super().outputs
else:
_lowerCAmelCase = super(_lowerCAmelCase , self ).outputs
if self.use_past:
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
for i in range(_lowerCAmelCase ):
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
_lowerCAmelCase = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Generate decoder inputs
_lowerCAmelCase = seq_length if not self.use_past else 1
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
_lowerCAmelCase = dict(**_lowerCAmelCase , **_lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
_lowerCAmelCase = common_inputs["decoder_input_ids"].shape[1]
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = decoder_seq_length + 3
_lowerCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_lowerCAmelCase = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase = min(_lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(_lowerCAmelCase , _lowerCAmelCase ) - min_num_layers
_lowerCAmelCase = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(_lowerCAmelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
torch.zeros(_lowerCAmelCase ),
) )
# TODO: test this.
_lowerCAmelCase = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(_lowerCAmelCase , _lowerCAmelCase ):
common_inputs["past_key_values"].append((torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_lowerCAmelCase , _lowerCAmelCase = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_lowerCAmelCase = seqlen + 2
_lowerCAmelCase , _lowerCAmelCase = self.num_layers
_lowerCAmelCase , _lowerCAmelCase = self.num_attention_heads
_lowerCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_lowerCAmelCase = common_inputs["attention_mask"].dtype
_lowerCAmelCase = torch.cat(
[common_inputs["attention_mask"], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase )] , dim=1 )
_lowerCAmelCase = [
(torch.zeros(_lowerCAmelCase ), torch.zeros(_lowerCAmelCase )) for _ in range(_lowerCAmelCase )
]
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_lowerCAmelCase = tokenizer.num_special_tokens_to_add(_lowerCAmelCase )
_lowerCAmelCase = compute_effective_axis_dimension(
_lowerCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_lowerCAmelCase )
# Generate dummy inputs according to compute batch and sequence
_lowerCAmelCase = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_lowerCAmelCase = dict(tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase ) )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = -1 , _lowerCAmelCase = -1 , _lowerCAmelCase = False , _lowerCAmelCase = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
elif self.task == "causal-lm":
_lowerCAmelCase = self._generate_dummy_inputs_for_causal_lm(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
else:
_lowerCAmelCase = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase )
return common_inputs
def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Dict:
if self.task in ["default", "seq2seq-lm"]:
_lowerCAmelCase = super()._flatten_past_key_values_(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
else:
_lowerCAmelCase = super(_lowerCAmelCase , self )._flatten_past_key_values_(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
| 18
| 1
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _UpperCAmelCase ( _UpperCamelCase ):
"""simple docstring"""
a_ = (DEISMultistepScheduler,)
a_ = (("""num_inference_steps""", 25),)
def lowercase ( self : List[Any] , **lowerCAmelCase_ : Optional[int] ) -> int:
__lowerCAmelCase = {
'num_train_timesteps': 1_0_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCAmelCase_ )
return config
def lowercase ( self : Any , lowerCAmelCase_ : Dict=0 , **lowerCAmelCase_ : int ) -> int:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop('num_inference_steps' , lowerCAmelCase_ )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase_ )
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase_ )
__lowerCAmelCase = scheduler_class.from_pretrained(lowerCAmelCase_ )
new_scheduler.set_timesteps(lowerCAmelCase_ )
# copy over dummy past residuals
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase , __lowerCAmelCase = sample, sample
for t in range(lowerCAmelCase_ , time_step + scheduler.config.solver_order + 1 ):
__lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
__lowerCAmelCase = new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase ( self : Dict ) -> Tuple:
pass
def lowercase ( self : str , lowerCAmelCase_ : List[str]=0 , **lowerCAmelCase_ : Any ) -> Dict:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop('num_inference_steps' , lowerCAmelCase_ )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
__lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
scheduler.set_timesteps(lowerCAmelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCAmelCase_ )
__lowerCAmelCase = scheduler_class.from_pretrained(lowerCAmelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCAmelCase_ )
# copy over dummy past residual (must be after setting timesteps)
__lowerCAmelCase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
__lowerCAmelCase = new_scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : int ) -> Dict:
if scheduler is None:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase_ )
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(**lowerCAmelCase_ )
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
return sample
def lowercase ( self : Tuple ) -> Tuple:
__lowerCAmelCase = dict(self.forward_default_kwargs )
__lowerCAmelCase = kwargs.pop('num_inference_steps' , lowerCAmelCase_ )
for scheduler_class in self.scheduler_classes:
__lowerCAmelCase = self.get_scheduler_config()
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
__lowerCAmelCase = self.dummy_sample
__lowerCAmelCase = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCAmelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCAmelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCAmelCase_ , 'set_timesteps' ):
__lowerCAmelCase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__lowerCAmelCase = [residual + 0.2, residual + 0.15, residual + 0.10]
__lowerCAmelCase = dummy_past_residuals[: scheduler.config.solver_order]
__lowerCAmelCase = scheduler.timesteps[5]
__lowerCAmelCase = scheduler.timesteps[6]
__lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
__lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def lowercase ( self : Tuple ) -> Tuple:
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__lowerCAmelCase = DEISMultistepScheduler(**self.get_scheduler_config() )
__lowerCAmelCase = self.full_loop(scheduler=lowerCAmelCase_ )
__lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
__lowerCAmelCase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = DEISMultistepScheduler.from_config(scheduler.config )
__lowerCAmelCase = self.full_loop(scheduler=lowerCAmelCase_ )
__lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def lowercase ( self : Any ) -> Optional[int]:
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=lowerCAmelCase_ )
def lowercase ( self : Tuple ) -> Optional[Any]:
self.check_over_configs(thresholding=lowerCAmelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , sample_max_value=lowerCAmelCase_ , algorithm_type='deis' , solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , )
def lowercase ( self : Dict ) -> str:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCAmelCase_ )
def lowercase ( self : int ) -> Optional[int]:
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , algorithm_type=lowerCAmelCase_ , )
__lowerCAmelCase = self.full_loop(
solver_order=lowerCAmelCase_ , solver_type=lowerCAmelCase_ , prediction_type=lowerCAmelCase_ , algorithm_type=lowerCAmelCase_ , )
assert not torch.isnan(lowerCAmelCase_ ).any(), "Samples have nan numbers"
def lowercase ( self : Optional[Any] ) -> Union[str, Any]:
self.check_over_configs(lower_order_final=lowerCAmelCase_ )
self.check_over_configs(lower_order_final=lowerCAmelCase_ )
def lowercase ( self : Optional[Any] ) -> int:
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=lowerCAmelCase_ , time_step=0 )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = self.full_loop()
__lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_mean.item() - 0.2_39_16 ) < 1e-3
def lowercase ( self : int ) -> List[str]:
__lowerCAmelCase = self.full_loop(prediction_type='v_prediction' )
__lowerCAmelCase = torch.mean(torch.abs(lowerCAmelCase_ ) )
assert abs(result_mean.item() - 0.0_91 ) < 1e-3
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase = self.scheduler_classes[0]
__lowerCAmelCase = self.get_scheduler_config(thresholding=lowerCAmelCase_ , dynamic_thresholding_ratio=0 )
__lowerCAmelCase = scheduler_class(**lowerCAmelCase_ )
__lowerCAmelCase = 1_0
__lowerCAmelCase = self.dummy_model()
__lowerCAmelCase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCAmelCase_ )
for i, t in enumerate(scheduler.timesteps ):
__lowerCAmelCase = model(lowerCAmelCase_ , lowerCAmelCase_ )
__lowerCAmelCase = scheduler.step(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 421
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( lowerCAmelCase_ : Any ):
__lowerCAmelCase = args.pruning_method
__lowerCAmelCase = args.threshold
__lowerCAmelCase = args.model_name_or_path.rstrip('/' )
__lowerCAmelCase = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
__lowerCAmelCase = torch.load(os.path.join(lowerCAmelCase_, 'pytorch_model.bin' ) )
__lowerCAmelCase = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
__lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
__lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
__lowerCAmelCase = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
__lowerCAmelCase = MagnitudeBinarizer.apply(inputs=lowerCAmelCase_, threshold=lowerCAmelCase_ )
__lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
__lowerCAmelCase = TopKBinarizer.apply(lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
__lowerCAmelCase = ThresholdBinarizer.apply(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
__lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
__lowerCAmelCase = name[:-6]
__lowerCAmelCase = model[F"""{prefix_}mask_scores"""]
__lowerCAmelCase , __lowerCAmelCase = -0.1, 1.1
__lowerCAmelCase = torch.sigmoid(lowerCAmelCase_ )
__lowerCAmelCase = s * (r - l) + l
__lowerCAmelCase = s_bar.clamp(min=0.0, max=1.0 )
__lowerCAmelCase = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
__lowerCAmelCase = os.path.join(
os.path.dirname(lowerCAmelCase_ ), F"""bertarized_{os.path.basename(lowerCAmelCase_ )}""" )
if not os.path.isdir(lowerCAmelCase_ ):
shutil.copytree(lowerCAmelCase_, lowerCAmelCase_ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase_, os.path.join(lowerCAmelCase_, 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_snake_case : int = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
_snake_case : Optional[Any] = parser.parse_args()
main(args)
| 421
| 1
|
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
__lowerCamelCase = '''\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n'''
__lowerCamelCase = '''\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n'''
__lowerCamelCase = '''\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for \'record\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'prediction_text\': the predicted answer text\n - for \'multirc\': list of question-answer dictionaries with the following keys:\n - \'idx\': index of the question-answer pair as specified by the dataset\n - \'prediction\': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for \'record\': list of question-answers dictionaries with the following keys:\n - \'idx\': index of the question as specified by the dataset\n - \'answers\': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for \'record\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1\': F1 score\n - for \'multirc\':\n - \'exact_match\': Exact match between answer and gold answer\n - \'f1_m\': Per-question macro-F1 score\n - \'f1_a\': Average F1 score over all answers\n - for \'axb\':\n \'matthews_correlation\': Matthew Correlation\n - for \'cb\':\n - \'accuracy\': Accuracy\n - \'f1\': F1 score\n - for all others:\n - \'accuracy\': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')\n >>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]\n >>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')\n >>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}\n\n >>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'''
def a__ ( UpperCamelCase_ : Dict, UpperCamelCase_ : List[str] ):
return float((preds == labels).mean() )
def a__ ( UpperCamelCase_ : Tuple, UpperCamelCase_ : Tuple, UpperCamelCase_ : List[str]="binary" ):
UpperCAmelCase__ :int = simple_accuracy(A__, A__ )
UpperCAmelCase__ :Tuple = float(fa_score(y_true=A__, y_pred=A__, average=A__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def a__ ( UpperCamelCase_ : Optional[Any], UpperCamelCase_ : Optional[Any] ):
UpperCAmelCase__ :Tuple = {}
for id_pred, label in zip(A__, A__ ):
UpperCAmelCase__ :Union[str, Any] = F'''{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}'''
UpperCAmelCase__ :List[Any] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase__ :Optional[int] = [(pred, label)]
UpperCAmelCase__ , UpperCAmelCase__ :Union[str, Any] = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase__ , UpperCAmelCase__ :Optional[Any] = zip(*A__ )
UpperCAmelCase__ :Optional[int] = fa_score(y_true=A__, y_pred=A__, average='''macro''' )
fas.append(A__ )
UpperCAmelCase__ :Optional[int] = int(sum(pred == label for pred, label in preds_labels ) == len(A__ ) )
ems.append(A__ )
UpperCAmelCase__ :Optional[int] = float(sum(A__ ) / len(A__ ) )
UpperCAmelCase__ :str = sum(A__ ) / len(A__ )
UpperCAmelCase__ :List[Any] = float(fa_score(y_true=A__, y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
def __SCREAMING_SNAKE_CASE ( self : Any ):
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"prediction_text": datasets.Value('''string''' ),
},
"references": {
"idx": {
"passage": datasets.Value('''int64''' ),
"query": datasets.Value('''int64''' ),
},
"answers": datasets.Sequence(datasets.Value('''string''' ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64''' ),
"paragraph": datasets.Value('''int64''' ),
"question": datasets.Value('''int64''' ),
},
"prediction": datasets.Value('''int64''' ),
},
"references": datasets.Value('''int64''' ),
}
else:
return {
"predictions": datasets.Value('''int64''' ),
"references": datasets.Value('''int64''' ),
}
def __SCREAMING_SNAKE_CASE ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : str ):
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_snake_case , _snake_case )}
elif self.config_name == "cb":
return acc_and_fa(_snake_case , _snake_case , fa_avg='''macro''' )
elif self.config_name == "record":
UpperCAmelCase__ :int = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
UpperCAmelCase__ :Tuple = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_snake_case , _snake_case )[0]
elif self.config_name == "multirc":
return evaluate_multirc(_snake_case , _snake_case )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_snake_case , _snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''' )
| 467
|
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=2 , _snake_case=3 , _snake_case=4 , _snake_case=2 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=36 , _snake_case=3 , _snake_case=4 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=6 , _snake_case=6 , _snake_case=3 , _snake_case=4 , _snake_case=None , _snake_case=1000 , ) -> Any:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = num_channels
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = text_seq_length
UpperCAmelCase = is_training
UpperCAmelCase = use_input_mask
UpperCAmelCase = use_token_type_ids
UpperCAmelCase = use_labels
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = coordinate_size
UpperCAmelCase = shape_size
UpperCAmelCase = num_labels
UpperCAmelCase = num_choices
UpperCAmelCase = scope
UpperCAmelCase = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase = text_seq_length
UpperCAmelCase = (image_size // patch_size) ** 2 + 1
UpperCAmelCase = self.text_seq_length + self.image_seq_length
def snake_case_ ( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase = bbox[i, j, 3]
UpperCAmelCase = bbox[i, j, 1]
UpperCAmelCase = t
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase = bbox[i, j, 2]
UpperCAmelCase = bbox[i, j, 0]
UpperCAmelCase = t
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_input_mask:
UpperCAmelCase = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase = None
if self.use_token_type_ids:
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Any:
"""simple docstring"""
UpperCAmelCase = LayoutLMvaModel(config=_snake_case )
model.to(_snake_case )
model.eval()
# text + image
UpperCAmelCase = model(_snake_case , pixel_values=_snake_case )
UpperCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
UpperCAmelCase = model(_snake_case , bbox=_snake_case , pixel_values=_snake_case , token_type_ids=_snake_case )
UpperCAmelCase = model(_snake_case , bbox=_snake_case , pixel_values=_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase = model(_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase = model(pixel_values=_snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = LayoutLMvaForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = self.num_labels
UpperCAmelCase = LayoutLMvaForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> str:
"""simple docstring"""
UpperCAmelCase = LayoutLMvaForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
UpperCAmelCase = model(
_snake_case , bbox=_snake_case , pixel_values=_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def snake_case_ ( self ) -> str:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) = config_and_inputs
UpperCAmelCase = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class lowercase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) -> Optional[Any]:
"""simple docstring"""
# `DocumentQuestionAnsweringPipeline` is expected to work with this model, but it combines the text and visual
# embedding along the sequence dimension (dim 1), which causes an error during post-processing as `p_mask` has
# the sequence dimension of the text embedding only.
# (see the line `embedding_output = torch.cat([embedding_output, visual_embeddings], dim=1)`)
return True
def snake_case_ ( self ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = LayoutLMvaModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def snake_case_ ( self , _snake_case , _snake_case , _snake_case=False ) -> str:
"""simple docstring"""
UpperCAmelCase = copy.deepcopy(_snake_case )
if model_class in get_values(_snake_case ):
UpperCAmelCase = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(_snake_case , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_snake_case ):
UpperCAmelCase = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
elif model_class in get_values(_snake_case ):
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
elif model_class in [
*get_values(_snake_case ),
]:
UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
elif model_class in [
*get_values(_snake_case ),
]:
UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=_snake_case , )
return inputs_dict
def snake_case_ ( self ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase = type
self.model_tester.create_and_check_model(*_snake_case )
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def snake_case_ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_snake_case )
def snake_case_ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
@slow
def snake_case_ ( self ) -> Any:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = LayoutLMvaModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class lowercase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case_ ( self ) -> Tuple:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=_snake_case ) if is_vision_available() else None
@slow
def snake_case_ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(_snake_case )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=_snake_case , return_tensors='''pt''' ).pixel_values.to(_snake_case )
UpperCAmelCase = torch.tensor([[1, 2]] )
UpperCAmelCase = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
UpperCAmelCase = model(
input_ids=input_ids.to(_snake_case ) , bbox=bbox.to(_snake_case ) , pixel_values=pixel_values.to(_snake_case ) , )
# verify the logits
UpperCAmelCase = torch.Size((1, 199, 768) )
self.assertEqual(outputs.last_hidden_state.shape , _snake_case )
UpperCAmelCase = torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , _snake_case , atol=1e-4 ) )
| 254
| 0
|
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
UpperCamelCase_ = ProphetNetTokenizer
UpperCamelCase_ = False
def lowercase_ ( self ) -> List[Any]:
"""simple docstring"""
super().setUp()
_lowercase: Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowercase: Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase_ ( self , A_ ) -> Dict:
"""simple docstring"""
_lowercase: Optional[int] = '''UNwant\u00E9d,running'''
_lowercase: int = '''unwanted, running'''
return input_text, output_text
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: Dict = self.tokenizer_class(self.vocab_file )
_lowercase: List[Any] = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(A_ , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) , [9, 6, 7, 12, 10, 11] )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: List[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) , ['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Optional[int] = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: int = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''h\u00E9llo'''] )
def lowercase_ ( self ) -> Tuple:
"""simple docstring"""
_lowercase: Union[str, Any] = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase_ ( self ) -> Union[str, Any]:
"""simple docstring"""
_lowercase: Dict = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''hello'''] )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
_lowercase: str = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self ) -> Any:
"""simple docstring"""
_lowercase: List[Any] = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Optional[Any] = BasicTokenizer(do_lower_case=A_ , strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) , ['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def lowercase_ ( self ) -> str:
"""simple docstring"""
_lowercase: List[Any] = BasicTokenizer(do_lower_case=A_ , never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) , ['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def lowercase_ ( self ) -> int:
"""simple docstring"""
_lowercase: Union[str, Any] = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
_lowercase: Union[str, Any] = {}
for i, token in enumerate(A_ ):
_lowercase: Union[str, Any] = i
_lowercase: Optional[int] = WordpieceTokenizer(vocab=A_ , unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) , [] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) , ['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) , ['''[UNK]''', '''runn''', '''##ing'''] )
@require_torch
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
_lowercase: Dict = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_lowercase: Tuple = ['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
_lowercase: List[Any] = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
_lowercase: List[str] = tokenizer(A_ , padding=A_ , return_tensors='''pt''' )
self.assertIsInstance(A_ , A_ )
_lowercase: int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(A_ , A_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
def lowercase_ ( self ) -> Dict:
"""simple docstring"""
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def lowercase_ ( self ) -> Optional[int]:
"""simple docstring"""
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def lowercase_ ( self ) -> List[str]:
"""simple docstring"""
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
@slow
def lowercase_ ( self ) -> Optional[Any]:
"""simple docstring"""
_lowercase: Tuple = self.tokenizer_class.from_pretrained('''microsoft/prophetnet-large-uncased''' )
_lowercase: Optional[Any] = tokenizer.encode('''sequence builders''' , add_special_tokens=A_ )
_lowercase: Tuple = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A_ )
_lowercase: int = tokenizer.build_inputs_with_special_tokens(A_ )
_lowercase: Dict = tokenizer.build_inputs_with_special_tokens(A_ , A_ )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 272
|
"""simple docstring"""
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
A__ : Tuple = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
A__ : Optional[int] = dataset.iloc[:, 1:2].values
A__ : Union[str, Any] = dataset.iloc[:, 2].values
A__ , A__ , A__ , A__ : Dict = train_test_split(X, y, test_size=0.2, random_state=0)
A__ : Union[str, Any] = PolynomialFeatures(degree=4)
A__ : List[Any] = poly_reg.fit_transform(X)
A__ : Dict = LinearRegression()
pol_reg.fit(X_poly, y)
def _lowerCAmelCase ( ):
"""simple docstring"""
plt.scatter(_UpperCamelCase , _UpperCamelCase , color='''red''' )
plt.plot(_UpperCamelCase , pol_reg.predict(poly_reg.fit_transform(_UpperCamelCase ) ) , color='''blue''' )
plt.title('''Truth or Bluff (Linear Regression)''' )
plt.xlabel('''Position level''' )
plt.ylabel('''Salary''' )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 272
| 1
|
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class lowercase_ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self : List[Any] ,lowercase__ : Path ,lowercase__ : Union[str, None] = None ,lowercase__ : Union[List[str], None] = None ,lowercase__ : Union[str, List[str], None] = None ,lowercase__ : bool = True ,):
__lowercase = [file for file in os.listdir(lowercase__ ) if os.path.isfile(os.path.join(lowercase__ ,lowercase__ ) )]
if identifier is not None:
__lowercase = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase__ ,lowercase__ ):
for n_ in n_identifier:
__lowercase = [file for file in files if n_ not in file]
else:
__lowercase = [file for file in files if n_identifier not in file]
__lowercase = ignore_files or []
ignore_files.append('''__init__.py''' )
__lowercase = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('''Testing''' ,lowercase__ )
if only_modules:
__lowercase = file.split('''.''' )[0]
try:
__lowercase = getattr(lowercase__ ,lowercase__ )
__lowercase = doctest.DocTestSuite(lowercase__ )
__lowercase = unittest.TextTestRunner().run(lowercase__ )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"{module_identifier} is not a module." )
else:
__lowercase = doctest.testfile(str('''..''' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = Path('''src/transformers''' )
__lowercase = '''modeling'''
__lowercase = [
'''modeling_ctrl.py''',
'''modeling_tf_ctrl.py''',
]
self.analyze_directory(lowercase__ ,identifier=lowercase__ ,ignore_files=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = Path('''src/transformers''' )
__lowercase = '''tokenization'''
self.analyze_directory(lowercase__ ,identifier=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
__lowercase = Path('''src/transformers''' )
__lowercase = '''configuration'''
self.analyze_directory(lowercase__ ,identifier=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
__lowercase = Path('''src/transformers''' )
__lowercase = ['''configuration''', '''modeling''', '''tokenization''']
self.analyze_directory(lowercase__ ,n_identifier=lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = Path('''docs/source''' )
__lowercase = ['''favicon.ico''']
self.analyze_directory(lowercase__ ,ignore_files=lowercase__ ,only_modules=lowercase__ )
| 41
|
'''simple docstring'''
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''KEY''')
lowerCAmelCase__ = TypeVar('''VAL''')
@dataclass(frozen=lowerCamelCase__ , slots=lowerCamelCase__ )
class lowercase_ (Generic[KEY, VAL] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : KEY
SCREAMING_SNAKE_CASE : VAL
class lowercase_ (_Item ):
"""simple docstring"""
def __init__( self : Optional[int] ):
super().__init__(lowercase__ ,lowercase__ )
def __bool__( self : List[str] ):
return False
lowerCAmelCase__ = _DeletedItem()
class lowercase_ (MutableMapping[KEY, VAL] ):
"""simple docstring"""
def __init__( self : Dict ,lowercase__ : int = 8 ,lowercase__ : float = 0.7_5 ):
__lowercase = initial_block_size
__lowercase = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
__lowercase = capacity_factor
__lowercase = 0
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : KEY ):
return hash(lowercase__ ) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : Dict ,lowercase__ : int ):
return (ind + 1) % len(self._buckets )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : int ,lowercase__ : KEY ,lowercase__ : VAL ):
__lowercase = self._buckets[ind]
if not stored:
__lowercase = _Item(lowercase__ ,lowercase__ )
self._len += 1
return True
elif stored.key == key:
__lowercase = _Item(lowercase__ ,lowercase__ )
return True
else:
return False
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : int ):
if len(self._buckets ) <= self._initial_block_size:
return False
__lowercase = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def SCREAMING_SNAKE_CASE ( self : Any ,lowercase__ : int ):
__lowercase = self._buckets
__lowercase = [None] * new_size
__lowercase = 0
for item in old_buckets:
if item:
self._add_item(item.key ,item.val )
def SCREAMING_SNAKE_CASE ( self : str ):
self._resize(len(self._buckets ) * 2 )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
self._resize(len(self._buckets ) // 2 )
def SCREAMING_SNAKE_CASE ( self : int ,lowercase__ : KEY ):
__lowercase = self._get_bucket_index(lowercase__ )
for _ in range(len(self._buckets ) ):
yield ind
__lowercase = self._get_next_ind(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
for ind in self._iterate_buckets(lowercase__ ):
if self._try_set(lowercase__ ,lowercase__ ,lowercase__ ):
break
def __setitem__( self : str ,lowercase__ : KEY ,lowercase__ : VAL ):
if self._is_full():
self._size_up()
self._add_item(lowercase__ ,lowercase__ )
def __delitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
raise KeyError(lowercase__ )
if item is _deleted:
continue
if item.key == key:
__lowercase = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self : Tuple ,lowercase__ : KEY ):
for ind in self._iterate_buckets(lowercase__ ):
__lowercase = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(lowercase__ )
def __len__( self : Optional[int] ):
return self._len
def __iter__( self : str ):
yield from (item.key for item in self._buckets if item)
def __repr__( self : Optional[Any] ):
__lowercase = ''' ,'''.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 41
| 1
|
"""simple docstring"""
import random
import sys
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
__snake_case : int = "Usage of script: script_name <size_of_canvas:int>"
__snake_case : Optional[Any] = [0] * 100 + [1] * 10
random.shuffle(choice)
def _lowercase ( __snake_case ) -> list[list[bool]]:
__lowerCAmelCase : str = [[False for i in range(__lowerCAmelCase )] for j in range(__lowerCAmelCase )]
return canvas
def _lowercase ( __snake_case ) -> None:
for i, row in enumerate(__lowerCAmelCase ):
for j, _ in enumerate(__lowerCAmelCase ):
__lowerCAmelCase : str = bool(random.getrandbits(1 ) )
def _lowercase ( __snake_case ) -> list[list[bool]]:
__lowerCAmelCase : List[str] = np.array(__lowerCAmelCase )
__lowerCAmelCase : Optional[Any] = np.array(create_canvas(current_canvas.shape[0] ) )
for r, row in enumerate(__lowerCAmelCase ):
for c, pt in enumerate(__lowerCAmelCase ):
__lowerCAmelCase : Dict = __judge_point(
__lowerCAmelCase ,current_canvas[r - 1 : r + 2, c - 1 : c + 2] )
__lowerCAmelCase : int = next_gen_canvas
del next_gen_canvas # cleaning memory as we move on.
__lowerCAmelCase : Any = current_canvas.tolist()
return return_canvas
def _lowercase ( __snake_case ,__snake_case ) -> bool:
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[int] = 0
# finding dead or alive neighbours count.
for i in neighbours:
for status in i:
if status:
alive += 1
else:
dead += 1
# handling duplicate entry for focus pt.
if pt:
alive -= 1
else:
dead -= 1
# running the rules of game here.
__lowerCAmelCase : int = pt
if pt:
if alive < 2:
__lowerCAmelCase : List[Any] = False
elif alive == 2 or alive == 3:
__lowerCAmelCase : Any = True
elif alive > 3:
__lowerCAmelCase : Any = False
else:
if alive == 3:
__lowerCAmelCase : Tuple = True
return state
if __name__ == "__main__":
if len(sys.argv) != 2:
raise Exception(usage_doc)
__snake_case : Any = int(sys.argv[1])
# main working structure of this module.
__snake_case : int = create_canvas(canvas_size)
seed(c)
__snake_case : str = plt.subplots()
fig.show()
__snake_case : Any = ListedColormap(['w', 'k'])
try:
while True:
__snake_case : List[Any] = run(c)
ax.matshow(c, cmap=cmap)
fig.canvas.draw()
ax.cla()
except KeyboardInterrupt:
# do nothing.
pass
| 712
|
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__snake_case : Any = True
except (ImportError, ModuleNotFoundError):
__snake_case : Optional[int] = False
if NLTK_AVAILABLE:
with FileLock('.lock') as lock:
nltk.download('punkt', quiet=True)
def _lowercase ( __snake_case ) -> str:
re.sub("<n>" ,"" ,__snake_case ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__snake_case ) )
| 615
| 0
|
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class __magic_name__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=1 , lowerCamelCase=False , **lowerCamelCase ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__A : Optional[int] = vocab_size
__A : Dict = d_embed
__A : Tuple = d_proj
__A : Union[str, Any] = cutoffs + [vocab_size]
__A : Any = [0] + self.cutoffs
__A : Tuple = div_val
__A : Any = self.cutoffs[0]
__A : Union[str, Any] = len(self.cutoffs ) - 1
__A : Optional[Any] = self.shortlist_size + self.n_clusters
__A : Union[str, Any] = keep_order
__A : List[Any] = []
__A : Union[str, Any] = []
def lowerCAmelCase__ ( self , lowerCamelCase ):
'''simple docstring'''
if self.n_clusters > 0:
__A : str = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=lowerCamelCase , name="cluster_weight" )
__A : Optional[int] = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=lowerCamelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
__A : Any = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=lowerCamelCase , name=f"out_projs_._{i}" , )
self.out_projs.append(lowerCamelCase )
else:
self.out_projs.append(lowerCamelCase )
__A : int = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=lowerCamelCase , name=f"out_layers_._{i}_._weight" , )
__A : Union[str, Any] = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=lowerCamelCase , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
__A ,__A : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
__A : Optional[int] = self.d_embed // (self.div_val**i)
__A : int = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=lowerCamelCase , name=f"out_projs_._{i}" )
self.out_projs.append(lowerCamelCase )
__A : Optional[int] = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=lowerCamelCase , name=f"out_layers_._{i}_._weight" , )
__A : Optional[int] = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=lowerCamelCase , name=f"out_layers_._{i}_._bias" , )
self.out_layers.append((weight, bias) )
super().build(lowerCamelCase )
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase=None ):
'''simple docstring'''
__A : List[Any] = x
if proj is not None:
__A : Union[str, Any] = tf.einsum("ibd,ed->ibe" , lowerCamelCase , lowerCamelCase )
return tf.einsum("ibd,nd->ibn" , lowerCamelCase , lowerCamelCase ) + b
@staticmethod
def lowerCAmelCase__ ( lowerCamelCase , lowerCamelCase ):
'''simple docstring'''
__A : List[str] = shape_list(lowerCamelCase )
__A : Any = tf.range(lp_size[0] , dtype=target.dtype )
__A : int = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase=True , lowerCamelCase=False ):
'''simple docstring'''
__A : Optional[int] = 0
if self.n_clusters == 0:
__A : Dict = self._logit(lowerCamelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
__A : Dict = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCamelCase , logits=lowerCamelCase )
__A : Dict = tf.nn.log_softmax(lowerCamelCase , axis=-1 )
else:
__A : Optional[Any] = shape_list(lowerCamelCase )
__A : Optional[int] = []
__A : List[Any] = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
__A ,__A : Tuple = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
__A : Tuple = (target >= l_idx) & (target < r_idx)
__A : Union[str, Any] = tf.where(lowerCamelCase )
__A : Any = tf.boolean_mask(lowerCamelCase , lowerCamelCase ) - l_idx
if self.div_val == 1:
__A : List[str] = self.out_layers[0][0][l_idx:r_idx]
__A : str = self.out_layers[0][1][l_idx:r_idx]
else:
__A : Union[str, Any] = self.out_layers[i][0]
__A : Tuple = self.out_layers[i][1]
if i == 0:
__A : Optional[int] = tf.concat([cur_W, self.cluster_weight] , 0 )
__A : Tuple = tf.concat([cur_b, self.cluster_bias] , 0 )
__A : Dict = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[0] )
__A : Tuple = tf.nn.log_softmax(lowerCamelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
__A : Union[str, Any] = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__A : List[str] = self._gather_logprob(lowerCamelCase , lowerCamelCase )
else:
__A : List[str] = self._logit(lowerCamelCase , lowerCamelCase , lowerCamelCase , self.out_projs[i] )
__A : Any = tf.nn.log_softmax(lowerCamelCase )
__A : List[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
__A : Union[str, Any] = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCamelCase )
if target is not None:
__A : Optional[Any] = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__A : Dict = tf.boolean_mask(lowerCamelCase , lowerCamelCase )
__A : Optional[Any] = self._gather_logprob(lowerCamelCase , lowerCamelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCamelCase , -cur_logprob , shape_list(lowerCamelCase ) )
__A : Union[str, Any] = tf.concat(lowerCamelCase , axis=-1 )
if target is not None:
if return_mean:
__A : Dict = tf.reduce_mean(lowerCamelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCamelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCamelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 111
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = tempfile.mkdtemp()
__A : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
__A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
__A : List[str] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073],
"image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__A : Union[str, Any] = os.path.join(self.tmpdirname , lowerCamelCase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self , **lowerCamelCase ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : int = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__A : int = [Image.fromarray(np.moveaxis(lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Union[str, Any] = self.get_tokenizer()
__A : int = self.get_rust_tokenizer()
__A : List[str] = self.get_image_processor()
__A : List[Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__A : List[Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCamelCase )
__A : int = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__A : List[str] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer , lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : List[Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__A : List[str] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
__A : int = self.get_image_processor(do_normalize=lowerCamelCase , padding_value=1.0 )
__A : Optional[int] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[Any] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : List[str] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : int = self.prepare_image_inputs()
__A : Optional[int] = image_processor(lowerCamelCase , return_tensors="np" )
__A : Dict = processor(images=lowerCamelCase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : int = self.get_tokenizer()
__A : Optional[int] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Optional[Any] = "lower newer"
__A : Union[str, Any] = processor(text=lowerCamelCase )
__A : int = tokenizer(lowerCamelCase , padding="max_length" , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Optional[int] = self.get_image_processor()
__A : Optional[Any] = self.get_tokenizer()
__A : int = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Tuple = "lower newer"
__A : int = self.prepare_image_inputs()
__A : str = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "token_type_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : Dict = self.get_image_processor()
__A : Tuple = self.get_tokenizer()
__A : Dict = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__A : Optional[int] = processor.batch_decode(lowerCamelCase )
__A : Optional[int] = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
__A : str = self.get_image_processor()
__A : Union[str, Any] = self.get_tokenizer()
__A : Union[str, Any] = AlignProcessor(tokenizer=lowerCamelCase , image_processor=lowerCamelCase )
__A : int = "lower newer"
__A : str = self.prepare_image_inputs()
__A : Tuple = processor(text=lowerCamelCase , images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 111
| 1
|
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
}
UpperCAmelCase_ = {
"vocab_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"},
"merges_file": {"ctrl": "https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"},
}
UpperCAmelCase_ = {
"ctrl": 2_5_6,
}
UpperCAmelCase_ = {
"Pregnancy": 1_6_8_6_2_9,
"Christianity": 7_6_7_5,
"Explain": 1_0_6_4_2_3,
"Fitness": 6_3_4_4_0,
"Saving": 6_3_1_6_3,
"Ask": 2_7_1_7_1,
"Ass": 9_5_9_8_5,
"Joke": 1_6_3_5_0_9,
"Questions": 4_5_6_2_2,
"Thoughts": 4_9_6_0_5,
"Retail": 5_2_3_4_2,
"Feminism": 1_6_4_3_3_8,
"Writing": 1_1_9_9_2,
"Atheism": 1_9_2_2_6_3,
"Netflix": 4_8_6_1_6,
"Computing": 3_9_6_3_9,
"Opinion": 4_3_2_1_3,
"Alone": 4_4_9_6_7,
"Funny": 5_8_9_1_7,
"Gaming": 4_0_3_5_8,
"Human": 4_0_8_8,
"India": 1_3_3_1,
"Joker": 7_7_1_3_8,
"Diet": 3_6_2_0_6,
"Legal": 1_1_8_5_9,
"Norman": 4_9_3_9,
"Tip": 7_2_6_8_9,
"Weight": 5_2_3_4_3,
"Movies": 4_6_2_7_3,
"Running": 2_3_4_2_5,
"Science": 2_0_9_0,
"Horror": 3_7_7_9_3,
"Confession": 6_0_5_7_2,
"Finance": 1_2_2_5_0,
"Politics": 1_6_3_6_0,
"Scary": 1_9_1_9_8_5,
"Support": 1_2_6_5_4,
"Technologies": 3_2_5_1_6,
"Teenage": 6_6_1_6_0,
"Event": 3_2_7_6_9,
"Learned": 6_7_4_6_0,
"Notion": 1_8_2_7_7_0,
"Wikipedia": 3_7_5_8_3,
"Books": 6_6_6_5,
"Extract": 7_6_0_5_0,
"Confessions": 1_0_2_7_0_1,
"Conspiracy": 7_5_9_3_2,
"Links": 6_3_6_7_4,
"Narcissus": 1_5_0_4_2_5,
"Relationship": 5_4_7_6_6,
"Relationships": 1_3_4_7_9_6,
"Reviews": 4_1_6_7_1,
"News": 4_2_5_6,
"Translation": 2_6_8_2_0,
"multilingual": 1_2_8_4_0_6,
}
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Tuple )->Union[str, Any]:
_lowerCAmelCase = set()
_lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase = char
_lowerCAmelCase = set(_SCREAMING_SNAKE_CASE )
return pairs
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ = CONTROL_CODES
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase="<unk>" , **_lowerCAmelCase ):
super().__init__(unk_token=_lowerCAmelCase , **_lowerCAmelCase )
with open(_lowerCAmelCase , encoding='''utf-8''' ) as vocab_handle:
_lowerCAmelCase = json.load(_lowerCAmelCase )
_lowerCAmelCase = {v: k for k, v in self.encoder.items()}
with open(_lowerCAmelCase , encoding='''utf-8''' ) as merges_handle:
_lowerCAmelCase = merges_handle.read().split('''\n''' )[1:-1]
_lowerCAmelCase = [tuple(merge.split() ) for merge in merges]
_lowerCAmelCase = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) )
_lowerCAmelCase = {}
@property
def __lowerCAmelCase ( self ):
return len(self.encoder )
def __lowerCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
if token in self.cache:
return self.cache[token]
_lowerCAmelCase = tuple(_lowerCAmelCase )
_lowerCAmelCase = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] )
_lowerCAmelCase = get_pairs(_lowerCAmelCase )
if not pairs:
return token
while True:
_lowerCAmelCase = min(_lowerCAmelCase , key=lambda _lowerCAmelCase : self.bpe_ranks.get(_lowerCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase , _lowerCAmelCase = bigram
_lowerCAmelCase = []
_lowerCAmelCase = 0
while i < len(_lowerCAmelCase ):
try:
_lowerCAmelCase = word.index(_lowerCAmelCase , _lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase = j
if word[i] == first and i < len(_lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase = tuple(_lowerCAmelCase )
_lowerCAmelCase = new_word
if len(_lowerCAmelCase ) == 1:
break
else:
_lowerCAmelCase = get_pairs(_lowerCAmelCase )
_lowerCAmelCase = '''@@ '''.join(_lowerCAmelCase )
_lowerCAmelCase = word[:-4]
_lowerCAmelCase = word
return word
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = []
_lowerCAmelCase = re.findall(R'''\S+\n?''' , _lowerCAmelCase )
for token in words:
split_tokens.extend(list(self.bpe(_lowerCAmelCase ).split(''' ''' ) ) )
return split_tokens
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return self.encoder.get(_lowerCAmelCase , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
return self.decoder.get(_lowerCAmelCase , self.unk_token )
def __lowerCAmelCase ( self , _lowerCAmelCase ):
_lowerCAmelCase = ''' '''.join(_lowerCAmelCase ).replace('''@@ ''' , '''''' ).strip()
return out_string
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase = None ):
if not os.path.isdir(_lowerCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_lowerCAmelCase = os.path.join(
_lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCAmelCase , ensure_ascii=_lowerCAmelCase ) + '''\n''' )
_lowerCAmelCase = 0
with open(_lowerCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_lowerCAmelCase = token_index
writer.write(''' '''.join(_lowerCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 664
|
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase ( snake_case_ ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = DiTPipeline
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE__ = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
SCREAMING_SNAKE_CASE__ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ = False
def __lowerCAmelCase ( self ):
torch.manual_seed(0 )
_lowerCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_lowerCAmelCase , activation_fn='''gelu-approximate''' , num_embeds_ada_norm=1_000 , norm_type='''ada_norm_zero''' , norm_elementwise_affine=_lowerCAmelCase , )
_lowerCAmelCase = AutoencoderKL()
_lowerCAmelCase = DDIMScheduler()
_lowerCAmelCase = {'''transformer''': transformer.eval(), '''vae''': vae.eval(), '''scheduler''': scheduler}
return components
def __lowerCAmelCase ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
if str(_lowerCAmelCase ).startswith('''mps''' ):
_lowerCAmelCase = torch.manual_seed(_lowerCAmelCase )
else:
_lowerCAmelCase = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowerCAmelCase = {
'''class_labels''': [1],
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def __lowerCAmelCase ( self ):
_lowerCAmelCase = '''cpu'''
_lowerCAmelCase = self.get_dummy_components()
_lowerCAmelCase = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowerCAmelCase = self.get_dummy_inputs(_lowerCAmelCase )
_lowerCAmelCase = pipe(**_lowerCAmelCase ).images
_lowerCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_lowerCAmelCase = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] )
_lowerCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_lowerCAmelCase , 1E-3 )
def __lowerCAmelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_lowerCAmelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCAmelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class UpperCAmelCase ( unittest.TestCase ):
def __lowerCAmelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ):
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-256''' )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''', '''white shark''', '''white wolf''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=40 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCAmelCase ( self ):
_lowerCAmelCase = DiTPipeline.from_pretrained('''facebook/DiT-XL-2-512''' )
_lowerCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('''cuda''' )
_lowerCAmelCase = ['''vase''', '''umbrella''']
_lowerCAmelCase = pipe.get_label_ids(_lowerCAmelCase )
_lowerCAmelCase = torch.manual_seed(0 )
_lowerCAmelCase = pipe(_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=25 , output_type='''np''' ).images
for word, image in zip(_lowerCAmelCase , _lowerCAmelCase ):
_lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
F'''/dit/{word}_512.npy''' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 664
| 1
|
def lowerCamelCase__ ( _a , _a , _a=False):
if isinstance(_a , _a) and isinstance(_a , _a):
SCREAMING_SNAKE_CASE : Tuple = len(set_a.intersection(_a))
if alternative_union:
SCREAMING_SNAKE_CASE : Dict = len(_a) + len(_a)
else:
SCREAMING_SNAKE_CASE : str = len(set_a.union(_a))
return intersection / union
if isinstance(_a , (list, tuple)) and isinstance(_a , (list, tuple)):
SCREAMING_SNAKE_CASE : Dict = [element for element in set_a if element in set_b]
if alternative_union:
SCREAMING_SNAKE_CASE : List[Any] = len(_a) + len(_a)
return len(_a) / union
else:
SCREAMING_SNAKE_CASE : Optional[Any] = set_a + [element for element in set_b if element not in set_a]
return len(_a) / len(_a)
return len(_a) / len(_a)
return None
if __name__ == "__main__":
a_ = {'a', 'b', 'c', 'd', 'e'}
a_ = {'c', 'd', 'e', 'f', 'h', 'i'}
print(jaccard_similarity(set_a, set_b))
| 25
|
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = UNetaDModel(
sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=1_0 , )
return model
@property
def _lowercase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
UpperCAmelCase = UNetaDModel(
sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = '''cpu''' # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase = DDPMScheduler()
UpperCAmelCase = AudioDiffusionPipeline(vqvae=_A , unet=self.dummy_unet , mel=_A , scheduler=_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(4_2 )
UpperCAmelCase = pipe(generator=_A , steps=4 )
UpperCAmelCase = output.audios[0]
UpperCAmelCase = output.images[0]
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(4_2 )
UpperCAmelCase = pipe(generator=_A , steps=4 , return_dict=_A )
UpperCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase = DDIMScheduler()
UpperCAmelCase = self.dummy_vqvae_and_unet
UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_A , scheduler=_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
UpperCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(4_2 )
UpperCAmelCase = pipe(raw_audio=_A , generator=_A , start_step=5 , steps=1_0 )
UpperCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase = self.dummy_unet_condition
UpperCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_A , mel=_A , scheduler=_A )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
np.random.seed(0 )
UpperCAmelCase = torch.rand((1, 1, 1_0) )
UpperCAmelCase = pipe(generator=_A , encoding=_A )
UpperCAmelCase = output.images[0]
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = torch_device
UpperCAmelCase = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
UpperCAmelCase = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase = torch.Generator(device=_A ).manual_seed(4_2 )
UpperCAmelCase = pipe(generator=_A )
UpperCAmelCase = output.audios[0]
UpperCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0]
UpperCAmelCase = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 130
| 0
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __lowerCAmelCase( lowerCAmelCase__ ):
def __init__( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
self.events.append('on_init_end' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
self.events.append('on_train_begin' )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
self.events.append('on_train_end' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
self.events.append('on_epoch_begin' )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
self.events.append('on_epoch_end' )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
self.events.append('on_step_begin' )
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , **SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
self.events.append('on_step_end' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
self.events.append('on_evaluate' )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
self.events.append('on_predict' )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
self.events.append('on_save' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Dict , **SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
self.events.append('on_log' )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , **SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
self.events.append('on_prediction_step' )
@require_torch
class __lowerCAmelCase( unittest.TestCase ):
def _lowercase ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tempfile.mkdtemp()
def _lowercase ( self : Any ):
"""simple docstring"""
shutil.rmtree(self.output_dir )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : int=0 , SCREAMING_SNAKE_CASE : List[str]=64 , SCREAMING_SNAKE_CASE : int=64 , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : Union[str, Any]=False , **SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = RegressionDataset(length=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = RegressionDataset(length=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = RegressionModelConfig(a=SCREAMING_SNAKE_CASE , b=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[str] = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE , report_to=[] , **SCREAMING_SNAKE_CASE )
return Trainer(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , callbacks=SCREAMING_SNAKE_CASE , )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
# Order doesn't matter
SCREAMING_SNAKE_CASE_ :Tuple = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
SCREAMING_SNAKE_CASE_ :List[str] = sorted(SCREAMING_SNAKE_CASE , key=lambda SCREAMING_SNAKE_CASE : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(SCREAMING_SNAKE_CASE , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE )
else:
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = ['on_init_end', 'on_train_begin']
SCREAMING_SNAKE_CASE_ :List[str] = 0
SCREAMING_SNAKE_CASE_ :Any = len(trainer.get_eval_dataloader() )
SCREAMING_SNAKE_CASE_ :List[str] = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(SCREAMING_SNAKE_CASE ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[int] = self.get_trainer()
SCREAMING_SNAKE_CASE_ :Optional[int] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
# Callbacks passed at init are added to the default callbacks
SCREAMING_SNAKE_CASE_ :Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
SCREAMING_SNAKE_CASE_ :List[str] = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
def _lowercase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE )
expected_callbacks.remove(SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :int = self.get_trainer()
SCREAMING_SNAKE_CASE_ :int = trainer.pop_callback(SCREAMING_SNAKE_CASE )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
trainer.add_callback(SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
# We can also add, pop, or remove by instance
SCREAMING_SNAKE_CASE_ :str = self.get_trainer()
SCREAMING_SNAKE_CASE_ :str = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE )
expected_callbacks.remove(SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Dict = self.get_trainer()
SCREAMING_SNAKE_CASE_ :str = trainer.callback_handler.callbacks[0]
SCREAMING_SNAKE_CASE_ :Optional[int] = trainer.pop_callback(SCREAMING_SNAKE_CASE )
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
trainer.add_callback(SCREAMING_SNAKE_CASE )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE )
def _lowercase ( self : Tuple ):
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
SCREAMING_SNAKE_CASE_ :List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE , self.get_expected_events(SCREAMING_SNAKE_CASE ) )
# Independent log/save/eval
SCREAMING_SNAKE_CASE_ :List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
SCREAMING_SNAKE_CASE_ :int = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE , self.get_expected_events(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
SCREAMING_SNAKE_CASE_ :Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE , self.get_expected_events(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ :List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
SCREAMING_SNAKE_CASE_ :Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE , self.get_expected_events(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ :Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
SCREAMING_SNAKE_CASE_ :List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE , self.get_expected_events(SCREAMING_SNAKE_CASE ) )
# A bit of everything
SCREAMING_SNAKE_CASE_ :str = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
SCREAMING_SNAKE_CASE_ :Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE , self.get_expected_events(SCREAMING_SNAKE_CASE ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
SCREAMING_SNAKE_CASE_ :Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE ) in warn_mock.call_args[0][0]
| 233
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE__ : str = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Dict = ['pixel_values']
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Union[int, float] = 1 / 255 , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = True , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = size if size is not None else {'shortest_edge': 224}
SCREAMING_SNAKE_CASE_ :List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ :List[str] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE , param_name='crop_size' )
SCREAMING_SNAKE_CASE_ :Dict = do_resize
SCREAMING_SNAKE_CASE_ :Any = size
SCREAMING_SNAKE_CASE_ :str = resample
SCREAMING_SNAKE_CASE_ :Any = do_center_crop
SCREAMING_SNAKE_CASE_ :int = crop_size
SCREAMING_SNAKE_CASE_ :Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE_ :Tuple = rescale_factor
SCREAMING_SNAKE_CASE_ :Tuple = do_normalize
SCREAMING_SNAKE_CASE_ :List[Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
SCREAMING_SNAKE_CASE_ :List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
SCREAMING_SNAKE_CASE_ :List[Any] = do_convert_rgb
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : PILImageResampling = PILImageResampling.BICUBIC , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Tuple , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = get_size_dict(SCREAMING_SNAKE_CASE , default_to_square=SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
SCREAMING_SNAKE_CASE_ :Optional[Any] = get_resize_output_image_size(SCREAMING_SNAKE_CASE , size=size['shortest_edge'] , default_to_square=SCREAMING_SNAKE_CASE )
return resize(SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Dict[str, int] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :int = get_size_dict(SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE , size=(size['height'], size['width']) , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[int, float] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Optional[int] , ):
"""simple docstring"""
return rescale(SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : np.ndarray , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Union[float, List[float]] , SCREAMING_SNAKE_CASE : Optional[Union[str, ChannelDimension]] = None , **SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
return normalize(SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE , data_format=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : ImageInput , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Dict[str, int] = None , SCREAMING_SNAKE_CASE : PILImageResampling = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : int = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : float = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : Optional[Union[float, List[float]]] = None , SCREAMING_SNAKE_CASE : bool = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE : Optional[ChannelDimension] = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE : List[Any] , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :List[Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ :Optional[int] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ :List[Any] = get_size_dict(SCREAMING_SNAKE_CASE , param_name='size' , default_to_square=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ :List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ :Any = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ :Dict = get_size_dict(SCREAMING_SNAKE_CASE , param_name='crop_size' , default_to_square=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Tuple = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ :List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ :Any = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ :str = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ :Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ :Optional[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
SCREAMING_SNAKE_CASE_ :Dict = make_list_of_images(SCREAMING_SNAKE_CASE )
if not valid_images(SCREAMING_SNAKE_CASE ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
SCREAMING_SNAKE_CASE_ :str = [convert_to_rgb(SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ :str = [to_numpy_array(SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ :Optional[Any] = [self.resize(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE , resample=SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ :Tuple = [self.center_crop(image=SCREAMING_SNAKE_CASE , size=SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ :List[str] = [self.rescale(image=SCREAMING_SNAKE_CASE , scale=SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ :List[Any] = [self.normalize(image=SCREAMING_SNAKE_CASE , mean=SCREAMING_SNAKE_CASE , std=SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE_ :str = [to_channel_dimension_format(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for image in images]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
| 233
| 1
|
from math import sqrt
def A__ ( SCREAMING_SNAKE_CASE_ : int ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE_ : int = 1_00_01 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 32
|
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :str = XCLIPTextConfig()
# derive patch size from model name
__magic_name__ :Union[str, Any] = model_name.find('''patch''' )
__magic_name__ :Optional[Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
__magic_name__ :int = XCLIPVisionConfig(patch_size=snake_case, num_frames=snake_case )
if "large" in model_name:
__magic_name__ :Dict = 7_6_8
__magic_name__ :int = 3_0_7_2
__magic_name__ :List[Any] = 1_2
__magic_name__ :str = 1_0_2_4
__magic_name__ :Any = 4_0_9_6
__magic_name__ :Optional[Any] = 1_6
__magic_name__ :Union[str, Any] = 2_4
__magic_name__ :Union[str, Any] = 7_6_8
__magic_name__ :Tuple = 3_0_7_2
if model_name == "xclip-large-patch14-16-frames":
__magic_name__ :List[str] = 3_3_6
__magic_name__ :Any = XCLIPConfig.from_text_vision_configs(snake_case, snake_case )
if "large" in model_name:
__magic_name__ :str = 7_6_8
return config
def __lowercase ( snake_case ):
"""simple docstring"""
if name == "token_embedding.weight":
__magic_name__ :Any = name.replace('''token_embedding.weight''', '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
__magic_name__ :Any = name.replace('''positional_embedding''', '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
__magic_name__ :List[str] = name.replace('''ln_1''', '''layer_norm1''' )
if "ln_2" in name:
__magic_name__ :str = name.replace('''ln_2''', '''layer_norm2''' )
if "c_fc" in name:
__magic_name__ :List[Any] = name.replace('''c_fc''', '''fc1''' )
if "c_proj" in name:
__magic_name__ :Any = name.replace('''c_proj''', '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
__magic_name__ :Any = name.replace('''transformer.resblocks''', '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
__magic_name__ :Union[str, Any] = name.replace('''attn.out_proj''', '''self_attn.out_proj''' )
if "ln_final" in name:
__magic_name__ :Tuple = name.replace('''ln_final''', '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
__magic_name__ :List[Any] = name.replace('''visual.class_embedding''', '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
__magic_name__ :Any = name.replace('''visual.positional_embedding''', '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''visual.transformer.resblocks''', '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
__magic_name__ :Tuple = name.replace('''visual.conv1''', '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
__magic_name__ :Tuple = name.replace('''visual.ln_pre''', '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
__magic_name__ :Optional[Any] = name.replace('''visual.ln_post''', '''vision_model.post_layernorm''' )
if "visual.proj" in name:
__magic_name__ :Tuple = name.replace('''visual.proj''', '''visual_projection.weight''' )
if "text_projection" in name:
__magic_name__ :int = name.replace('''text_projection''', '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
__magic_name__ :int = name.replace('''prompts_visual_proj''', '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
__magic_name__ :Dict = name.replace('''prompts_visual_ln''', '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
__magic_name__ :List[Any] = name.replace('''positional''', '''position''' )
if name.startswith('''mit.resblocks''' ):
__magic_name__ :Union[str, Any] = name.replace('''mit.resblocks''', '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
__magic_name__ :str = name.replace('''prompts_generator.norm''', '''prompts_generator.layernorm''' )
return name
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ :Any = orig_state_dict.pop(snake_case )
if "attn.in_proj" in key:
__magic_name__ :str = key.split('''.''' )
if key.startswith('''visual''' ):
__magic_name__ :List[Any] = key_split[3]
__magic_name__ :List[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__magic_name__ :List[Any] = val[
:dim, :
]
__magic_name__ :List[str] = val[
dim : dim * 2, :
]
__magic_name__ :List[str] = val[
-dim:, :
]
else:
__magic_name__ :str = val[
:dim
]
__magic_name__ :Optional[int] = val[
dim : dim * 2
]
__magic_name__ :Any = val[
-dim:
]
else:
if "weight" in key:
__magic_name__ :int = val[
:dim, :
]
__magic_name__ :Union[str, Any] = val[
dim : dim * 2, :
]
__magic_name__ :List[Any] = val[
-dim:, :
]
else:
__magic_name__ :Union[str, Any] = val[:dim]
__magic_name__ :str = val[
dim : dim * 2
]
__magic_name__ :Dict = val[-dim:]
elif key.startswith('''mit''' ):
__magic_name__ :List[Any] = key_split[2]
__magic_name__ :Any = config.vision_config.mit_hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Optional[int] = val[dim : dim * 2, :]
__magic_name__ :int = val[-dim:, :]
else:
__magic_name__ :Tuple = val[:dim]
__magic_name__ :Optional[int] = val[dim : dim * 2]
__magic_name__ :Optional[int] = val[-dim:]
else:
__magic_name__ :Any = key_split[2]
__magic_name__ :List[Any] = config.text_config.hidden_size
if "weight" in key:
__magic_name__ :Union[str, Any] = val[:dim, :]
__magic_name__ :Tuple = val[
dim : dim * 2, :
]
__magic_name__ :str = val[-dim:, :]
else:
__magic_name__ :int = val[:dim]
__magic_name__ :Any = val[
dim : dim * 2
]
__magic_name__ :str = val[-dim:]
else:
__magic_name__ :Tuple = rename_key(snake_case )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__magic_name__ :List[Any] = val.T
__magic_name__ :Optional[Any] = val
return orig_state_dict
def __lowercase ( snake_case ):
"""simple docstring"""
if num_frames == 8:
__magic_name__ :Any = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 1_6:
__magic_name__ :List[Any] = '''eating_spaghetti.npy'''
elif num_frames == 3_2:
__magic_name__ :Tuple = '''eating_spaghetti_32_frames.npy'''
__magic_name__ :str = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''', filename=snake_case, repo_type='''dataset''', )
__magic_name__ :List[Any] = np.load(snake_case )
return list(snake_case )
def __lowercase ( snake_case, snake_case=None, snake_case=False ):
"""simple docstring"""
__magic_name__ :Union[str, Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__magic_name__ :Optional[int] = model_to_url[model_name]
__magic_name__ :List[str] = 8
if "16-frames" in model_name:
__magic_name__ :List[Any] = 1_6
elif "shot" in model_name:
__magic_name__ :Dict = 3_2
__magic_name__ :str = get_xclip_config(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
model.eval()
if "drive" in checkpoint_url:
__magic_name__ :Any = '''pytorch_model.bin'''
gdown.cached_download(snake_case, snake_case, quiet=snake_case )
__magic_name__ :Optional[Any] = torch.load(snake_case, map_location='''cpu''' )['''model''']
else:
__magic_name__ :Optional[int] = torch.hub.load_state_dict_from_url(snake_case )['''model''']
__magic_name__ :List[str] = convert_state_dict(snake_case, snake_case )
__magic_name__ :List[Any] = XCLIPModel(snake_case )
__magic_name__ , __magic_name__ :Optional[Any] = model.load_state_dict(snake_case, strict=snake_case )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__magic_name__ :str = 3_3_6 if model_name == '''xclip-large-patch14-16-frames''' else 2_2_4
__magic_name__ :Optional[int] = VideoMAEImageProcessor(size=snake_case )
__magic_name__ :Optional[int] = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Tuple = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
__magic_name__ :Optional[int] = XCLIPProcessor(image_processor=snake_case, tokenizer=snake_case )
__magic_name__ :List[Any] = prepare_video(snake_case )
__magic_name__ :str = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''], videos=snake_case, return_tensors='''pt''', padding=snake_case )
print('''Shape of pixel values:''', inputs.pixel_values.shape )
with torch.no_grad():
__magic_name__ :Tuple = model(**snake_case )
# Verify outputs
__magic_name__ :Any = outputs.logits_per_video
__magic_name__ :str = logits_per_video.softmax(dim=1 )
print('''Probs:''', snake_case )
# kinetics-400
if model_name == "xclip-base-patch32":
__magic_name__ :Dict = torch.tensor([[0.0019, 0.9951, 0.0030]] )
elif model_name == "xclip-base-patch32-16-frames":
__magic_name__ :str = torch.tensor([[7.0_9_9_9E-0_4, 9.9_8_8_3E-0_1, 4.5_5_8_0E-0_4]] )
elif model_name == "xclip-base-patch16":
__magic_name__ :Tuple = torch.tensor([[0.0083, 0.9681, 0.0236]] )
elif model_name == "xclip-base-patch16-16-frames":
__magic_name__ :Tuple = torch.tensor([[7.6_9_3_7E-0_4, 9.9_7_2_8E-0_1, 1.9_4_7_3E-0_3]] )
elif model_name == "xclip-large-patch14":
__magic_name__ :str = torch.tensor([[0.0062, 0.9864, 0.0075]] )
elif model_name == "xclip-large-patch14-16-frames":
__magic_name__ :Optional[int] = torch.tensor([[3.3_8_7_7E-0_4, 9.9_9_3_7E-0_1, 2.8_8_8_8E-0_4]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__magic_name__ :Optional[int] = torch.tensor([[0.0555, 0.8914, 0.0531]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__magic_name__ :List[str] = torch.tensor([[3.8_5_5_4E-0_4, 9.9_9_2_9E-0_1, 3.2_7_5_4E-0_4]] )
elif model_name == "xclip-large-patch14-kinetics-600":
__magic_name__ :List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__magic_name__ :Tuple = torch.tensor([[7.1_8_9_0E-0_6, 9.9_9_9_4E-0_1, 5.6_5_5_9E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__magic_name__ :List[str] = torch.tensor([[1.0_3_2_0E-0_5, 9.9_9_9_3E-0_1, 6.2_4_3_5E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_7_7E-0_6, 9.9_9_9_0E-0_1, 9.8_3_8_6E-0_5]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__magic_name__ :Optional[int] = torch.tensor([[4.1_3_4_7E-0_5, 9.9_9_6_2E-0_1, 3.3_4_1_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
__magic_name__ :Union[str, Any] = torch.tensor([[8.5_8_5_7E-0_5, 9.9_9_2_8E-0_1, 6.3_2_9_1E-0_4]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
__magic_name__ :Optional[int] = torch.tensor([[0.0027, 0.9904, 0.0070]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
__magic_name__ :Any = torch.tensor([[9.8_2_1_9E-0_4, 9.9_5_9_3E-0_1, 3.0_8_6_3E-0_3]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__magic_name__ :Optional[int] = torch.tensor([[3.5_0_8_2E-0_4, 9.9_7_8_5E-0_1, 1.7_9_6_6E-0_3]] )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
assert torch.allclose(snake_case, snake_case, atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(snake_case, organization='''nielsr''' )
processor.push_to_hub(snake_case, organization='''nielsr''' )
slow_tokenizer.push_to_hub(snake_case, organization='''nielsr''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""xclip-base-patch32""",
type=str,
help="""Name of the model.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE__ : List[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 0
| 0
|
from math import sqrt
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
for i in range(1 , int(sqrt(SCREAMING_SNAKE_CASE ) + 1 ) ):
if n % i == 0 and i != sqrt(SCREAMING_SNAKE_CASE ):
total += i + n // i
elif i == sqrt(SCREAMING_SNAKE_CASE ):
total += i
return total - n
def _lowerCamelCase ( SCREAMING_SNAKE_CASE = 10000 ):
'''simple docstring'''
A_ = sum(
i
for i in range(1 , SCREAMING_SNAKE_CASE )
if sum_of_divisors(sum_of_divisors(SCREAMING_SNAKE_CASE ) ) == i and sum_of_divisors(SCREAMING_SNAKE_CASE ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 709
|
from __future__ import annotations
import pandas as pd
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [0] * no_of_processes
A_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(SCREAMING_SNAKE_CASE ):
A_ = burst_time[i]
A_ = 0
A_ = 0
A_ = 999999999
A_ = 0
A_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(SCREAMING_SNAKE_CASE ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
A_ = remaining_time[j]
A_ = j
A_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
A_ = remaining_time[short]
if minm == 0:
A_ = 999999999
if remaining_time[short] == 0:
complete += 1
A_ = False
# Find finish time of current process
A_ = increment_time + 1
# Calculate waiting time
A_ = finish_time - arrival_time[short]
A_ = finar - burst_time[short]
if waiting_time[short] < 0:
A_ = 0
# Increment time
increment_time += 1
return waiting_time
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE ):
A_ = burst_time[i] + waiting_time[i]
return turn_around_time
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
A_ = 0
for i in range(SCREAMING_SNAKE_CASE ):
A_ = total_waiting_time + waiting_time[i]
A_ = total_turn_around_time + turn_around_time[i]
print(f"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print('''Average turn around time =''' , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
__lowercase = int(input())
__lowercase = [0] * no_of_processes
__lowercase = [0] * no_of_processes
__lowercase = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
__lowercase , __lowercase = map(int, input().split())
__lowercase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__lowercase = burst_time
__lowercase = no_of_processes
__lowercase = waiting_time
__lowercase = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
__lowercase = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs)
| 563
| 0
|
'''simple docstring'''
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
assert (
isinstance(__A , __A ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
lowercase , lowercase : int =1, 1
for _ in range(number_of_steps - 1 ):
lowercase , lowercase : Optional[int] =current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 94
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A :
def __init__( self, UpperCamelCase__, UpperCamelCase__=13, UpperCamelCase__=10, UpperCamelCase__=3, UpperCamelCase__=2, UpperCamelCase__=2, UpperCamelCase__=2, UpperCamelCase__=True, UpperCamelCase__=True, UpperCamelCase__=32, UpperCamelCase__=5, UpperCamelCase__=4, UpperCamelCase__=37, UpperCamelCase__="gelu", UpperCamelCase__=0.1, UpperCamelCase__=0.1, UpperCamelCase__=10, UpperCamelCase__=0.02, UpperCamelCase__=0.9, UpperCamelCase__=None, ):
"""simple docstring"""
lowerCAmelCase_ = parent
lowerCAmelCase_ = batch_size
lowerCAmelCase_ = image_size
lowerCAmelCase_ = num_channels
lowerCAmelCase_ = patch_size
lowerCAmelCase_ = tubelet_size
lowerCAmelCase_ = num_frames
lowerCAmelCase_ = is_training
lowerCAmelCase_ = use_labels
lowerCAmelCase_ = hidden_size
lowerCAmelCase_ = num_hidden_layers
lowerCAmelCase_ = num_attention_heads
lowerCAmelCase_ = intermediate_size
lowerCAmelCase_ = hidden_act
lowerCAmelCase_ = hidden_dropout_prob
lowerCAmelCase_ = attention_probs_dropout_prob
lowerCAmelCase_ = type_sequence_label_size
lowerCAmelCase_ = initializer_range
lowerCAmelCase_ = mask_ratio
lowerCAmelCase_ = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
lowerCAmelCase_ = (image_size // patch_size) ** 2
lowerCAmelCase_ = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
lowerCAmelCase_ = int(mask_ratio * self.seq_length )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ = None
if self.use_labels:
lowerCAmelCase_ = ids_tensor([self.batch_size], self.type_sequence_label_size )
lowerCAmelCase_ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, num_frames=self.num_frames, tubelet_size=self.tubelet_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=UpperCamelCase__, initializer_range=self.initializer_range, )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
lowerCAmelCase_ = model(UpperCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForPreTraining(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase_ = torch.ones((self.num_masks,) )
lowerCAmelCase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
lowerCAmelCase_ = mask.expand(self.batch_size, -1 ).bool()
lowerCAmelCase_ = model(UpperCamelCase__, UpperCamelCase__ )
# model only returns predictions for masked patches
lowerCAmelCase_ = mask.sum().item()
lowerCAmelCase_ = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape, (self.batch_size, num_masked_patches, decoder_num_labels) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.prepare_config_and_inputs()
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = config_and_inputs
lowerCAmelCase_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class A ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__snake_case = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
__snake_case = (
{'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
__snake_case = False
__snake_case = False
__snake_case = False
__snake_case = False
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEModelTester(self )
lowerCAmelCase_ = ConfigTester(self, config_class=UpperCamelCase__, has_text_modality=UpperCamelCase__, hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__=False ):
"""simple docstring"""
lowerCAmelCase_ = copy.deepcopy(UpperCamelCase__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
lowerCAmelCase_ = torch.ones((self.model_tester.num_masks,) )
lowerCAmelCase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
lowerCAmelCase_ = mask.expand(self.model_tester.batch_size, -1 ).bool()
lowerCAmelCase_ = bool_masked_pos.to(UpperCamelCase__ )
if return_labels:
if model_class in [
*get_values(UpperCamelCase__ ),
]:
lowerCAmelCase_ = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=UpperCamelCase__ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''VideoMAE does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings(), (nn.Module) )
lowerCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__, nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = model_class(UpperCamelCase__ )
lowerCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ = [*signature.parameters.keys()]
lowerCAmelCase_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1], UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCamelCase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ = VideoMAEModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
if not self.has_attentions:
pass
else:
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ = True
for model_class in self.all_model_classes:
lowerCAmelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase_ = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
lowerCAmelCase_ = True
lowerCAmelCase_ = False
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
lowerCAmelCase_ = len(UpperCamelCase__ )
# Check attention is always last and order is fine
lowerCAmelCase_ = True
lowerCAmelCase_ = True
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
self.assertEqual(out_len + 1, len(UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ), self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, seq_len, seq_len], )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ ):
lowerCAmelCase_ = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
lowerCAmelCase_ = model(**self._prepare_for_class(UpperCamelCase__, UpperCamelCase__ ) )
lowerCAmelCase_ = outputs.hidden_states
lowerCAmelCase_ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(UpperCamelCase__ ), UpperCamelCase__ )
lowerCAmelCase_ = self.model_tester.seq_length - self.model_tester.num_masks
lowerCAmelCase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
lowerCAmelCase_ , lowerCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase_ = True
check_hidden_states_output(UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
pass
def __UpperCamelCase ( ):
lowerCAmelCase_ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
lowerCAmelCase_ = np.load(_A )
return list(_A )
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to(
UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_video()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor([0.3_669, -0.0_688, -0.2_421] ).to(UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], UpperCamelCase__, atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(UpperCamelCase__ )
lowerCAmelCase_ = self.default_image_processor
lowerCAmelCase_ = prepare_video()
lowerCAmelCase_ = image_processor(UpperCamelCase__, return_tensors='''pt''' ).to(UpperCamelCase__ )
# add boolean mask, indicating which patches to mask
lowerCAmelCase_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''', filename='''bool_masked_pos.pt''' )
lowerCAmelCase_ = torch.load(UpperCamelCase__ )
# forward pass
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
# verify the logits
lowerCAmelCase_ = torch.Size([1, 1408, 1536] )
lowerCAmelCase_ = torch.tensor(
[[0.7_994, 0.9_612, 0.8_508], [0.7_401, 0.8_958, 0.8_302], [0.5_862, 0.7_468, 0.7_325]], device=UpperCamelCase__ )
self.assertEqual(outputs.logits.shape, UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], UpperCamelCase__, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
lowerCAmelCase_ = torch.tensor([0.5_142], device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss, UpperCamelCase__, atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
lowerCAmelCase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''', norm_pix_loss=UpperCamelCase__ ).to(
UpperCamelCase__ )
with torch.no_grad():
lowerCAmelCase_ = model(**UpperCamelCase__ )
lowerCAmelCase_ = torch.tensor(torch.tensor([0.6_469] ), device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.loss, UpperCamelCase__, atol=1E-4 ) )
| 431
| 0
|
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a__: Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self,__lowerCamelCase,__lowerCamelCase = None,__lowerCamelCase = None ):
super().__init__()
A__ = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
A__ = torch.zeros(__lowerCamelCase,__lowerCamelCase )
else:
A__ = None
A__ = torch.nn.Parameter(__lowerCamelCase )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
def __init__( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,):
super().__init__()
self.register_modules(
vqvae=__lowerCamelCase,transformer=__lowerCamelCase,text_encoder=__lowerCamelCase,tokenizer=__lowerCamelCase,scheduler=__lowerCamelCase,learned_classifier_free_sampling_embeddings=__lowerCamelCase,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = len(__lowerCamelCase ) if isinstance(__lowerCamelCase,__lowerCamelCase ) else 1
# get prompt text embeddings
A__ = self.tokenizer(
__lowerCamelCase,padding='''max_length''',max_length=self.tokenizer.model_max_length,return_tensors='''pt''',)
A__ = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
A__ = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
A__ = text_input_ids[:, : self.tokenizer.model_max_length]
A__ = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
A__ = prompt_embeds / prompt_embeds.norm(dim=-1,keepdim=__lowerCamelCase )
# duplicate text embeddings for each generation per prompt
A__ = prompt_embeds.repeat_interleave(__lowerCamelCase,dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
A__ = self.learned_classifier_free_sampling_embeddings.embeddings
A__ = negative_prompt_embeds.unsqueeze(0 ).repeat(__lowerCamelCase,1,1 )
else:
A__ = [''''''] * batch_size
A__ = text_input_ids.shape[-1]
A__ = self.tokenizer(
__lowerCamelCase,padding='''max_length''',max_length=__lowerCamelCase,truncation=__lowerCamelCase,return_tensors='''pt''',)
A__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
A__ = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1,keepdim=__lowerCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
A__ = negative_prompt_embeds.shape[1]
A__ = negative_prompt_embeds.repeat(1,__lowerCamelCase,1 )
A__ = negative_prompt_embeds.view(batch_size * num_images_per_prompt,__lowerCamelCase,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
A__ = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self,__lowerCamelCase,__lowerCamelCase = 100,__lowerCamelCase = 5.0,__lowerCamelCase = 1.0,__lowerCamelCase = 1,__lowerCamelCase = None,__lowerCamelCase = None,__lowerCamelCase = "pil",__lowerCamelCase = True,__lowerCamelCase = None,__lowerCamelCase = 1,):
if isinstance(__lowerCamelCase,__lowerCamelCase ):
A__ = 1
elif isinstance(__lowerCamelCase,__lowerCamelCase ):
A__ = len(__lowerCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}" )
A__ = batch_size * num_images_per_prompt
A__ = guidance_scale > 1.0
A__ = self._encode_prompt(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase,__lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowerCamelCase )}." )
# get the initial completely masked latents unless the user supplied it
A__ = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
A__ = self.transformer.num_vector_embeds - 1
A__ = torch.full(__lowerCamelCase,__lowerCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
A__ = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCamelCase,device=self.device )
A__ = self.scheduler.timesteps.to(self.device )
A__ = latents
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the sample if we are doing classifier free guidance
A__ = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
A__ = self.transformer(__lowerCamelCase,encoder_hidden_states=__lowerCamelCase,timestep=__lowerCamelCase ).sample
if do_classifier_free_guidance:
A__ , A__ = model_output.chunk(2 )
A__ = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(__lowerCamelCase,dim=1,keepdim=__lowerCamelCase )
A__ = self.truncate(__lowerCamelCase,__lowerCamelCase )
# remove `log(0)`'s (`-inf`s)
A__ = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(__lowerCamelCase,timestep=__lowerCamelCase,sample=__lowerCamelCase,generator=__lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase )
A__ = self.vqvae.config.vq_embed_dim
A__ = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
A__ = self.vqvae.quantize.get_codebook_entry(__lowerCamelCase,shape=__lowerCamelCase )
A__ = self.vqvae.decode(__lowerCamelCase,force_not_quantize=__lowerCamelCase ).sample
A__ = (image / 2 + 0.5).clamp(0,1 )
A__ = image.cpu().permute(0,2,3,1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCamelCase )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase ):
A__ , A__ = torch.sort(__lowerCamelCase,1,descending=__lowerCamelCase )
A__ = torch.exp(__lowerCamelCase )
A__ = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
A__ = torch.full_like(keep_mask[:, 0:1, :],__lowerCamelCase )
A__ = torch.cat((all_true, keep_mask),dim=1 )
A__ = keep_mask[:, :-1, :]
A__ = keep_mask.gather(1,indices.argsort(1 ) )
A__ = log_p_x_0.clone()
A__ = -torch.inf # -inf = log(0)
return rv
| 212
|
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
def __init__( self,__lowerCamelCase,__lowerCamelCase=13,__lowerCamelCase=30,__lowerCamelCase=2,__lowerCamelCase=3,__lowerCamelCase=True,__lowerCamelCase=True,__lowerCamelCase=32,__lowerCamelCase=5,__lowerCamelCase=4,__lowerCamelCase=37,__lowerCamelCase="gelu",__lowerCamelCase=0.1,__lowerCamelCase=0.1,__lowerCamelCase=10,__lowerCamelCase=0.02,__lowerCamelCase=None,__lowerCamelCase=2,):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = scope
A__ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = num_patches + 1
def UpperCamelCase ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size],self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def UpperCamelCase ( self ):
return ViTConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=__lowerCamelCase,initializer_range=self.initializer_range,encoder_stride=self.encoder_stride,)
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A__ = 1
A__ = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase ):
A__ = self.type_sequence_label_size
A__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = model(__lowerCamelCase,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A__ = 1
A__ = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape,(self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase ( self ):
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self ):
A__ = ViTModelTester(self )
A__ = ConfigTester(self,config_class=__lowerCamelCase,has_text_modality=__lowerCamelCase,hidden_size=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def UpperCamelCase ( self ):
pass
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase,nn.Linear ) )
def UpperCamelCase ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCamelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ['''pixel_values''']
self.assertListEqual(arg_names[:1],__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def UpperCamelCase ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCamelCase ( self ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCamelCase__( )->int:
A__ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def UpperCamelCase ( self ):
A__ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__lowerCamelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape,__lowerCamelCase )
A__ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
def UpperCamelCase ( self ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
A__ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__lowerCamelCase )
A__ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''',size=480 )
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A__ = model(__lowerCamelCase,interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
A__ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape,__lowerCamelCase )
A__ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3],__lowerCamelCase,atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase ( self ):
A__ = ViTModel.from_pretrained('''facebook/dino-vits8''',torch_dtype=torch.floataa,device_map='''auto''' )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCamelCase,return_tensors='''pt''' )
A__ = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A__ = model(__lowerCamelCase )
| 212
| 1
|
"""simple docstring"""
from maths.prime_factors import prime_factors
def snake_case ( lowerCAmelCase_ ) -> int:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCAmelCase_ )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(lowerCAmelCase_ ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103
|
"""simple docstring"""
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
snake_case = logging.get_logger(__name__)
snake_case = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
snake_case = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def snake_case ( lowerCAmelCase_ ) -> str:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_snake_case = model_type_to_module_name(lowerCAmelCase_ )
_snake_case = importlib.import_module(f""".{module_name}""" , '''transformers.models''' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '''__name__''' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_snake_case = importlib.import_module('''transformers''' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> int:
_snake_case = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as reader:
return json.load(lowerCAmelCase_ )
class UpperCAmelCase :
def __init__( self : Any ):
"""simple docstring"""
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(__lowerCamelCase )
def __UpperCAmelCase ( cls : List[str] , __lowerCamelCase : List[str] , **__lowerCamelCase : Dict ):
"""simple docstring"""
_snake_case = kwargs.pop('''config''' , __lowerCamelCase )
_snake_case = kwargs.pop('''trust_remote_code''' , __lowerCamelCase )
_snake_case = True
_snake_case , _snake_case = ImageProcessingMixin.get_image_processor_dict(__lowerCamelCase , **__lowerCamelCase )
_snake_case = config_dict.get('''image_processor_type''' , __lowerCamelCase )
_snake_case = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
_snake_case = config_dict['''auto_map''']['''AutoImageProcessor''']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_snake_case = config_dict.pop('''feature_extractor_type''' , __lowerCamelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
_snake_case = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
_snake_case = config_dict['''auto_map''']['''AutoFeatureExtractor''']
_snake_case = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = AutoConfig.from_pretrained(__lowerCamelCase , **__lowerCamelCase )
# It could be in `config.image_processor_type``
_snake_case = getattr(__lowerCamelCase , '''image_processor_type''' , __lowerCamelCase )
if hasattr(__lowerCamelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
_snake_case = config.auto_map['''AutoImageProcessor''']
if image_processor_class is not None:
_snake_case = image_processor_class_from_name(__lowerCamelCase )
_snake_case = image_processor_auto_map is not None
_snake_case = image_processor_class is not None or type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING
_snake_case = resolve_trust_remote_code(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
if has_remote_code and trust_remote_code:
_snake_case = get_class_from_dynamic_module(
__lowerCamelCase , __lowerCamelCase , **__lowerCamelCase )
_snake_case = kwargs.pop('''code_revision''' , __lowerCamelCase )
if os.path.isdir(__lowerCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__lowerCamelCase ) in IMAGE_PROCESSOR_MAPPING:
_snake_case = IMAGE_PROCESSOR_MAPPING[type(__lowerCamelCase )]
return image_processor_class.from_dict(__lowerCamelCase , **__lowerCamelCase )
raise ValueError(
f"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
f"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __UpperCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
IMAGE_PROCESSOR_MAPPING.register(__lowerCamelCase , __lowerCamelCase )
| 103
| 1
|
from __future__ import annotations
import collections
import pprint
from pathlib import Path
def __lowerCamelCase ( __lowerCAmelCase : str ) -> str:
return "".join(sorted(__lowerCAmelCase ) )
def __lowerCamelCase ( __lowerCAmelCase : str ) -> list[str]:
return word_by_signature[signature(__lowerCAmelCase )]
UpperCamelCase = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8')
UpperCamelCase = sorted({word.strip().lower() for word in data.splitlines()})
UpperCamelCase = collections.defaultdict(list)
for word in word_list:
word_by_signature[signature(word)].append(word)
if __name__ == "__main__":
UpperCamelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1}
with open('anagrams.txt', 'w') as file:
file.write('all_anagrams = \n ')
file.write(pprint.pformat(all_anagrams))
| 719
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCamelCase = imread(r'digital_image_processing/image_data/lena_small.jpg')
UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY)
def __lowerCamelCase ( ) -> int:
__UpperCamelCase : int = cn.convert_to_negative(__lowerCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowerCamelCase ( ) -> Optional[Any]:
with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__lowerCAmelCase , 110 ) ).startswith(
"""<PIL.Image.Image image mode=RGB size=100x100 at""" )
def __lowerCamelCase ( ) -> Dict:
__UpperCamelCase : str = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowerCamelCase ( ) -> str:
__UpperCamelCase : List[Any] = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
__UpperCamelCase : Optional[int] = canny.canny(__lowerCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def __lowerCamelCase ( ) -> Optional[int]:
assert gg.gaussian_filter(__lowerCAmelCase , 5 , sigma=0.9 ).all()
def __lowerCamelCase ( ) -> Tuple:
# laplace diagonals
__UpperCamelCase : List[str] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
__UpperCamelCase : Any = conv.img_convolve(__lowerCAmelCase , __lowerCAmelCase ).astype(__lowerCAmelCase )
assert res.any()
def __lowerCamelCase ( ) -> List[str]:
assert med.median_filter(__lowerCAmelCase , 3 ).any()
def __lowerCamelCase ( ) -> int:
__UpperCamelCase , __UpperCamelCase : List[Any] = sob.sobel_filter(__lowerCAmelCase )
assert grad.any() and theta.any()
def __lowerCamelCase ( ) -> Optional[int]:
__UpperCamelCase : int = sp.make_sepia(__lowerCAmelCase , 20 )
assert sepia.all()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" ) -> Union[str, Any]:
__UpperCamelCase : str = bs.Burkes(imread(__lowerCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowerCamelCase ( __lowerCAmelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> str:
__UpperCamelCase : Dict = rs.NearestNeighbour(imread(__lowerCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowerCamelCase ( ) -> Union[str, Any]:
__UpperCamelCase : Any = """digital_image_processing/image_data/lena.jpg"""
# Reading the image and converting it to grayscale.
__UpperCamelCase : int = imread(__lowerCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
__UpperCamelCase : Dict = 0
__UpperCamelCase : Optional[Any] = 0
__UpperCamelCase : str = image[x_coordinate][y_coordinate]
__UpperCamelCase : Tuple = lbp.get_neighbors_pixel(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
__UpperCamelCase : List[Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
__UpperCamelCase : str = lbp.local_binary_value(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
assert lbp_image.any()
| 515
| 0
|
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Any , lowerCamelCase : Tuple , lowerCamelCase : Optional[int]=13 , lowerCamelCase : str=7 , lowerCamelCase : Any=True , lowerCamelCase : Tuple=True , lowerCamelCase : int=True , lowerCamelCase : int=True , lowerCamelCase : Dict=True , lowerCamelCase : Tuple=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : int=2 , lowerCamelCase : Optional[int]=99 , lowerCamelCase : List[Any]=0 , lowerCamelCase : Any=32 , lowerCamelCase : Optional[Any]=5 , lowerCamelCase : int=4 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Tuple=512 , lowerCamelCase : Optional[Any]=12 , lowerCamelCase : Optional[int]=2 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Union[str, Any]=3 , lowerCamelCase : List[str]=4 , lowerCamelCase : Any="last" , lowerCamelCase : Optional[int]=None , lowerCamelCase : Optional[Any]=None , ) -> Dict:
__snake_case : Any = parent
__snake_case : Any = batch_size
__snake_case : Optional[Any] = seq_length
__snake_case : Optional[int] = is_training
__snake_case : List[Any] = use_input_lengths
__snake_case : Optional[Any] = use_token_type_ids
__snake_case : str = use_labels
__snake_case : Union[str, Any] = gelu_activation
__snake_case : Optional[int] = sinusoidal_embeddings
__snake_case : Optional[int] = causal
__snake_case : List[str] = asm
__snake_case : Optional[int] = n_langs
__snake_case : Tuple = vocab_size
__snake_case : Optional[int] = n_special
__snake_case : Any = hidden_size
__snake_case : List[Any] = num_hidden_layers
__snake_case : List[str] = num_attention_heads
__snake_case : Any = hidden_dropout_prob
__snake_case : Dict = attention_probs_dropout_prob
__snake_case : Optional[Any] = max_position_embeddings
__snake_case : Union[str, Any] = type_vocab_size
__snake_case : List[str] = type_sequence_label_size
__snake_case : List[str] = initializer_range
__snake_case : Optional[Any] = num_labels
__snake_case : List[str] = num_choices
__snake_case : Optional[Any] = summary_type
__snake_case : int = use_proj
__snake_case : Optional[Any] = scope
def __snake_case ( self : Any ) -> List[Any]:
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case : int = None
if self.use_input_lengths:
__snake_case : Any = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__snake_case : Any = None
if self.use_token_type_ids:
__snake_case : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__snake_case : str = None
__snake_case : Optional[Any] = None
__snake_case : List[str] = None
if self.use_labels:
__snake_case : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case : Tuple = ids_tensor([self.batch_size] , 2 ).float()
__snake_case : Any = ids_tensor([self.batch_size] , self.num_choices )
__snake_case : List[str] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __snake_case ( self : Optional[Any] ) -> Dict:
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , ) -> Dict:
__snake_case : List[str] = FlaubertModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : str = model(lowerCamelCase , lengths=lowerCamelCase , langs=lowerCamelCase )
__snake_case : List[Any] = model(lowerCamelCase , langs=lowerCamelCase )
__snake_case : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , ) -> Optional[Any]:
__snake_case : Optional[Any] = FlaubertWithLMHeadModel(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case ( self : Dict , lowerCamelCase : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : List[Any] , ) -> List[str]:
__snake_case : Union[str, Any] = FlaubertForQuestionAnsweringSimple(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Optional[int] = model(lowerCamelCase )
__snake_case : Optional[int] = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __snake_case ( self : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Tuple , lowerCamelCase : Dict , ) -> Union[str, Any]:
__snake_case : str = FlaubertForQuestionAnswering(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase )
__snake_case : Dict = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , p_mask=lowerCamelCase , )
__snake_case : List[Any] = model(
lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase , cls_index=lowerCamelCase , is_impossible=lowerCamelCase , )
((__snake_case) , ) : Union[str, Any] = result_with_labels.to_tuple()
__snake_case : List[Any] = model(lowerCamelCase , start_positions=lowerCamelCase , end_positions=lowerCamelCase )
((__snake_case) , ) : List[str] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __snake_case ( self : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , ) -> str:
__snake_case : Union[str, Any] = FlaubertForSequenceClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Dict = model(lowerCamelCase )
__snake_case : List[Any] = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __snake_case ( self : Any , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , ) -> str:
__snake_case : Any = self.num_labels
__snake_case : Union[str, Any] = FlaubertForTokenClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : int = model(lowerCamelCase , attention_mask=lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __snake_case ( self : Optional[Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case : List[str] = self.num_choices
__snake_case : Any = FlaubertForMultipleChoice(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__snake_case : Any = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case : Union[str, Any] = model(
lowerCamelCase , attention_mask=lowerCamelCase , token_type_ids=lowerCamelCase , labels=lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __snake_case ( self : Tuple ) -> Tuple:
__snake_case : int = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : int = config_and_inputs
__snake_case : str = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCAmelCase : Optional[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __snake_case ( self : List[str] , lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : int , lowerCamelCase : Union[str, Any] ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __snake_case ( self : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]=False ) -> List[str]:
__snake_case : Optional[Any] = super()._prepare_for_class(lowerCamelCase , lowerCamelCase , return_labels=lowerCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__snake_case : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
__snake_case : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase )
return inputs_dict
def __snake_case ( self : Tuple ) -> int:
__snake_case : int = FlaubertModelTester(self )
__snake_case : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , emb_dim=37 )
def __snake_case ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __snake_case ( self : Union[str, Any] ) -> Optional[int]:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*lowerCamelCase )
def __snake_case ( self : Any ) -> str:
__snake_case : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*lowerCamelCase )
def __snake_case ( self : Any ) -> Union[str, Any]:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*lowerCamelCase )
def __snake_case ( self : List[str] ) -> Optional[int]:
__snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*lowerCamelCase )
def __snake_case ( self : int ) -> Tuple:
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*lowerCamelCase )
def __snake_case ( self : int ) -> Tuple:
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*lowerCamelCase )
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*lowerCamelCase )
@slow
def __snake_case ( self : Any ) -> str:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : Dict = FlaubertModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
@slow
@require_torch_gpu
def __snake_case ( self : str ) -> int:
__snake_case , __snake_case : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__snake_case : str = True
__snake_case : List[Any] = model_class(config=lowerCamelCase )
__snake_case : Union[str, Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
__snake_case : str = torch.jit.trace(
lowerCamelCase , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(lowerCamelCase , os.path.join(lowerCamelCase , "traced_model.pt" ) )
__snake_case : Union[str, Any] = torch.jit.load(os.path.join(lowerCamelCase , "traced_model.pt" ) , map_location=lowerCamelCase )
loaded(inputs_dict["input_ids"].to(lowerCamelCase ) , inputs_dict["attention_mask"].to(lowerCamelCase ) )
@require_torch
class a (unittest.TestCase ):
"""simple docstring"""
@slow
def __snake_case ( self : str ) -> List[Any]:
__snake_case : int = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased" )
__snake_case : Dict = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
__snake_case : List[str] = model(lowerCamelCase )[0]
__snake_case : Any = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , lowerCamelCase )
__snake_case : Dict = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCamelCase , atol=1E-4 ) )
| 81
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def _lowerCamelCase ( lowerCamelCase_: str=None ):
'''simple docstring'''
if subparsers is not None:
A : Tuple = subparsers.add_parser('''test''' )
else:
A : List[str] = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''' , default=lowerCamelCase_ , help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase_ )
return parser
def _lowerCamelCase ( lowerCamelCase_: str ):
'''simple docstring'''
A : Dict = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
A : Any = script_name
else:
A : str = f"""--config_file={args.config_file} {script_name}"""
A : Tuple = ['''accelerate-launch'''] + test_args.split()
A : List[Any] = execute_subprocess_async(lowerCamelCase_ , env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def _lowerCamelCase ( ):
'''simple docstring'''
A : List[str] = test_command_parser()
A : Any = parser.parse_args()
test_command(lowerCamelCase_ )
if __name__ == "__main__":
main()
| 256
| 0
|
'''simple docstring'''
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = ['image_processor', 'tokenizer']
_lowercase = 'Pix2StructImageProcessor'
_lowercase = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self : str , lowerCamelCase : int , lowerCamelCase : Optional[Any] )-> Tuple:
snake_case__ : Optional[Any] = False
super().__init__(lowerCamelCase , lowerCamelCase )
def __call__( self : int , lowerCamelCase : str=None , lowerCamelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowerCamelCase : bool = True , lowerCamelCase : Union[bool, str, PaddingStrategy] = False , lowerCamelCase : Union[bool, str, TruncationStrategy] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[int] = 2_048 , lowerCamelCase : int = 0 , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[bool] = None , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = False , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[str, TensorType]] = None , **lowerCamelCase : Union[str, Any] , )-> BatchEncoding:
if images is None and text is None:
raise ValueError("""You have to specify either images or text.""" )
# Get only text
if images is None and not self.image_processor.is_vqa:
snake_case__ : Dict = self.tokenizer
snake_case__ : str = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
snake_case__ : List[str] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , max_patches=lowerCamelCase , **lowerCamelCase )
else:
# add pixel_values and bbox
snake_case__ : Optional[int] = self.image_processor(
lowerCamelCase , return_tensors=lowerCamelCase , max_patches=lowerCamelCase , header_text=lowerCamelCase , **lowerCamelCase )
if text is not None and not self.image_processor.is_vqa:
snake_case__ : Optional[int] = self.tokenizer(
text=lowerCamelCase , add_special_tokens=lowerCamelCase , padding=lowerCamelCase , truncation=lowerCamelCase , max_length=lowerCamelCase , stride=lowerCamelCase , pad_to_multiple_of=lowerCamelCase , return_attention_mask=lowerCamelCase , return_overflowing_tokens=lowerCamelCase , return_special_tokens_mask=lowerCamelCase , return_offsets_mapping=lowerCamelCase , return_token_type_ids=lowerCamelCase , return_length=lowerCamelCase , verbose=lowerCamelCase , return_tensors=lowerCamelCase , **lowerCamelCase , )
if "attention_mask" in text_encoding:
snake_case__ : Dict = text_encoding.pop("""attention_mask""" )
if "input_ids" in text_encoding:
snake_case__ : Dict = text_encoding.pop("""input_ids""" )
else:
snake_case__ : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(lowerCamelCase )
return encoding_image_processor
def __lowerCAmelCase ( self : Optional[Any] , *lowerCamelCase : Dict , **lowerCamelCase : Optional[Any] )-> List[Any]:
return self.tokenizer.batch_decode(*lowerCamelCase , **lowerCamelCase )
def __lowerCAmelCase ( self : str , *lowerCamelCase : Tuple , **lowerCamelCase : List[Any] )-> Dict:
return self.tokenizer.decode(*lowerCamelCase , **lowerCamelCase )
@property
def __lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
snake_case__ : Optional[int] = self.tokenizer.model_input_names
snake_case__ : Optional[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 172
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = 'blenderbot-small'
_lowercase = ['past_key_values']
_lowercase = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Optional[int] , lowerCamelCase : Any=50_265 , lowerCamelCase : str=512 , lowerCamelCase : List[Any]=8 , lowerCamelCase : List[Any]=2_048 , lowerCamelCase : str=16 , lowerCamelCase : Any=8 , lowerCamelCase : Dict=2_048 , lowerCamelCase : int=16 , lowerCamelCase : Optional[int]=0.0 , lowerCamelCase : Any=0.0 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Dict="gelu" , lowerCamelCase : List[str]=512 , lowerCamelCase : List[Any]=0.1 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : str=0.0 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : List[str]=1 , lowerCamelCase : List[str]=False , lowerCamelCase : List[str]=0 , lowerCamelCase : List[str]=1 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Tuple=2 , **lowerCamelCase : Tuple , )-> Tuple:
snake_case__ : List[str] = vocab_size
snake_case__ : Dict = max_position_embeddings
snake_case__ : Union[str, Any] = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : Dict = encoder_layers
snake_case__ : Optional[Any] = encoder_attention_heads
snake_case__ : Dict = decoder_ffn_dim
snake_case__ : Dict = decoder_layers
snake_case__ : Dict = decoder_attention_heads
snake_case__ : Tuple = dropout
snake_case__ : Union[str, Any] = attention_dropout
snake_case__ : Union[str, Any] = activation_dropout
snake_case__ : str = activation_function
snake_case__ : Optional[Any] = init_std
snake_case__ : Tuple = encoder_layerdrop
snake_case__ : Optional[Any] = decoder_layerdrop
snake_case__ : List[str] = use_cache
snake_case__ : List[Any] = encoder_layers
snake_case__ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , is_encoder_decoder=lowerCamelCase , decoder_start_token_id=lowerCamelCase , forced_eos_token_id=lowerCamelCase , **lowerCamelCase , )
class _A ( UpperCamelCase ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case__ : Any = {0: """batch"""}
snake_case__ : Tuple = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
snake_case__ : Tuple = {0: """batch""", 1: """decoder_sequence"""}
snake_case__ : List[str] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase , direction="""inputs""" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : Union[str, Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
] )
if self.use_past:
snake_case__ , snake_case__ : List[Any] = self.num_layers
for i in range(lowerCamelCase ):
snake_case__ : Any = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case__ : int = {0: """batch""", 2: """past_sequence + sequence"""}
else:
snake_case__ : List[Any] = OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}),
("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}),
("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}),
("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self : List[Any] )-> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = super().outputs
else:
snake_case__ : str = super(lowerCamelCase , self ).outputs
if self.use_past:
snake_case__ , snake_case__ : int = self.num_layers
for i in range(lowerCamelCase ):
snake_case__ : Optional[Any] = {0: """batch""", 2: """past_sequence + sequence"""}
snake_case__ : Tuple = {0: """batch""", 2: """past_sequence + sequence"""}
return common_outputs
def __lowerCAmelCase ( self : Tuple , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Generate decoder inputs
snake_case__ : Any = seq_length if not self.use_past else 1
snake_case__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ : List[str] = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : List[str] = dict(**lowerCamelCase , **lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : int = common_inputs["""input_ids"""].shape
snake_case__ : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1]
snake_case__ , snake_case__ : List[Any] = self.num_attention_heads
snake_case__ : Any = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : int = decoder_seq_length + 3
snake_case__ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : Optional[int] = torch.cat(
[common_inputs["""decoder_attention_mask"""], torch.ones(lowerCamelCase , lowerCamelCase )] , dim=1 )
snake_case__ : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : Dict = self.num_layers
snake_case__ : Any = min(lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = max(lowerCamelCase , lowerCamelCase ) - min_num_layers
snake_case__ : Optional[Any] = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder"""
for _ in range(lowerCamelCase ):
common_inputs["past_key_values"].append(
(
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
torch.zeros(lowerCamelCase ),
) )
# TODO: test this.
snake_case__ : List[str] = encoder_shape if remaining_side_name == """encoder""" else decoder_shape
for _ in range(lowerCamelCase , lowerCamelCase ):
common_inputs["past_key_values"].append((torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) )
return common_inputs
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
snake_case__ , snake_case__ : List[str] = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
snake_case__ : List[Any] = seqlen + 2
snake_case__ , snake_case__ : Union[str, Any] = self.num_layers
snake_case__ , snake_case__ : List[Any] = self.num_attention_heads
snake_case__ : Optional[int] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : str = common_inputs["""attention_mask"""].dtype
snake_case__ : List[Any] = torch.cat(
[common_inputs["""attention_mask"""], torch.ones(lowerCamelCase , lowerCamelCase , dtype=lowerCamelCase )] , dim=1 )
snake_case__ : Tuple = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(lowerCamelCase )
]
return common_inputs
def __lowerCAmelCase ( self : List[Any] , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : List[Any] = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : int = tokenizer.num_special_tokens_to_add(lowerCamelCase )
snake_case__ : List[Any] = compute_effective_axis_dimension(
lowerCamelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=lowerCamelCase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Dict = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : str = dict(tokenizer(lowerCamelCase , return_tensors=lowerCamelCase ) )
return common_inputs
def __lowerCAmelCase ( self : str , lowerCamelCase : PreTrainedTokenizer , lowerCamelCase : int = -1 , lowerCamelCase : int = -1 , lowerCamelCase : bool = False , lowerCamelCase : Optional[TensorType] = None , )-> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : int = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
elif self.task == "causal-lm":
snake_case__ : int = self._generate_dummy_inputs_for_causal_lm(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
else:
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
lowerCamelCase , batch_size=lowerCamelCase , seq_length=lowerCamelCase , is_pair=lowerCamelCase , framework=lowerCamelCase )
return common_inputs
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : List[str] , lowerCamelCase : int )-> str:
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Tuple = super()._flatten_past_key_values_(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
else:
snake_case__ : List[str] = super(lowerCamelCase , self )._flatten_past_key_values_(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
| 172
| 1
|
from __future__ import annotations
__a :str = '#'
class _a :
"""simple docstring"""
def __init__( self : int ):
A_ = {}
def __A ( self : int , UpperCAmelCase : str ):
A_ = self._trie
for char in text:
if char not in trie:
A_ = {}
A_ = trie[char]
A_ = True
def __A ( self : Tuple , UpperCAmelCase : str ):
A_ = self._trie
for char in prefix:
if char in trie:
A_ = trie[char]
else:
return []
return self._elements(UpperCAmelCase )
def __A ( self : Union[str, Any] , UpperCAmelCase : dict ):
A_ = []
for c, v in d.items():
A_ = [" "] if c == END else [(c + s) for s in self._elements(UpperCAmelCase )]
result.extend(UpperCAmelCase )
return tuple(UpperCAmelCase )
__a :Dict = Trie()
__a :Union[str, Any] = ('depart', 'detergent', 'daring', 'dog', 'deer', 'deal')
for word in words:
trie.insert_word(word)
def __snake_case ( __UpperCamelCase : str ):
"""simple docstring"""
A_ = trie.find_word(__UpperCamelCase )
return tuple(string + word for word in suffixes )
def __snake_case ( ):
"""simple docstring"""
print(autocomplete_using_trie("de" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 86
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_UpperCAmelCase : int = {
'''configuration_blip''': [
'''BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlipConfig''',
'''BlipTextConfig''',
'''BlipVisionConfig''',
],
'''processing_blip''': ['''BlipProcessor'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = ['''BlipImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[Any] = [
'''BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlipModel''',
'''BlipPreTrainedModel''',
'''BlipForConditionalGeneration''',
'''BlipForQuestionAnswering''',
'''BlipVisionModel''',
'''BlipTextModel''',
'''BlipForImageTextRetrieval''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
'''TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFBlipModel''',
'''TFBlipPreTrainedModel''',
'''TFBlipForConditionalGeneration''',
'''TFBlipForQuestionAnswering''',
'''TFBlipVisionModel''',
'''TFBlipTextModel''',
'''TFBlipForImageTextRetrieval''',
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_UpperCAmelCase : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 72
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , ):
"""simple docstring"""
a__ : Tuple = size if size is not None else {"height": 18, "width": 18}
a__ : Dict = parent
a__ : Dict = batch_size
a__ : Optional[int] = num_channels
a__ : List[str] = image_size
a__ : Optional[Any] = min_resolution
a__ : List[str] = max_resolution
a__ : List[Any] = do_resize
a__ : Dict = size
a__ : Union[str, Any] = apply_ocr
def _A ( self ):
"""simple docstring"""
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _a ( SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
A :List[str] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _A ( self ):
"""simple docstring"""
a__ : List[Any] = LayoutLMvaImageProcessingTester(self )
@property
def _A ( self ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _A ( self ):
"""simple docstring"""
a__ : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "apply_ocr" ) )
def _A ( self ):
"""simple docstring"""
a__ : Tuple = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
a__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def _A ( self ):
"""simple docstring"""
pass
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
a__ : str = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __UpperCAmelCase )
self.assertIsInstance(encoding.boxes , __UpperCAmelCase )
# Test batched
a__ : str = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self ):
"""simple docstring"""
a__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
a__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a__ : Tuple = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self ):
"""simple docstring"""
a__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
a__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
a__ : Tuple = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _A ( self ):
"""simple docstring"""
a__ : Optional[int] = LayoutLMvaImageProcessor()
from datasets import load_dataset
a__ : Optional[int] = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
a__ : str = Image.open(ds[0]["file"] ).convert("RGB" )
a__ : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
a__ : Optional[int] = [["11:14", "to", "11:39", "a.m", "11:39", "to", "11:44", "a.m.", "11:44", "a.m.", "to", "12:25", "p.m.", "12:25", "to", "12:58", "p.m.", "12:58", "to", "4:00", "p.m.", "2:00", "to", "5:00", "p.m.", "Coffee", "Break", "Coffee", "will", "be", "served", "for", "men", "and", "women", "in", "the", "lobby", "adjacent", "to", "exhibit", "area.", "Please", "move", "into", "exhibit", "area.", "(Exhibits", "Open)", "TRRF", "GENERAL", "SESSION", "(PART", "|)", "Presiding:", "Lee", "A.", "Waller", "TRRF", "Vice", "President", "“Introductory", "Remarks”", "Lee", "A.", "Waller,", "TRRF", "Vice", "Presi-", "dent", "Individual", "Interviews", "with", "TRRF", "Public", "Board", "Members", "and", "Sci-", "entific", "Advisory", "Council", "Mem-", "bers", "Conducted", "by", "TRRF", "Treasurer", "Philip", "G.", "Kuehn", "to", "get", "answers", "which", "the", "public", "refrigerated", "warehousing", "industry", "is", "looking", "for.", "Plus", "questions", "from", "the", "floor.", "Dr.", "Emil", "M.", "Mrak,", "University", "of", "Cal-", "ifornia,", "Chairman,", "TRRF", "Board;", "Sam", "R.", "Cecil,", "University", "of", "Georgia", "College", "of", "Agriculture;", "Dr.", "Stanley", "Charm,", "Tufts", "University", "School", "of", "Medicine;", "Dr.", "Robert", "H.", "Cotton,", "ITT", "Continental", "Baking", "Company;", "Dr.", "Owen", "Fennema,", "University", "of", "Wis-", "consin;", "Dr.", "Robert", "E.", "Hardenburg,", "USDA.", "Questions", "and", "Answers", "Exhibits", "Open", "Capt.", "Jack", "Stoney", "Room", "TRRF", "Scientific", "Advisory", "Council", "Meeting", "Ballroom", "Foyer"]] # noqa: E231
a__ : Dict = [[[141, 57, 214, 69], [228, 58, 252, 69], [141, 75, 216, 88], [230, 79, 280, 88], [142, 260, 218, 273], [230, 261, 255, 273], [143, 279, 218, 290], [231, 282, 290, 291], [143, 342, 218, 354], [231, 345, 289, 355], [202, 362, 227, 373], [143, 379, 220, 392], [231, 382, 291, 394], [144, 714, 220, 726], [231, 715, 256, 726], [144, 732, 220, 745], [232, 736, 291, 747], [144, 769, 218, 782], [231, 770, 256, 782], [141, 788, 202, 801], [215, 791, 274, 804], [143, 826, 204, 838], [215, 826, 240, 838], [142, 844, 202, 857], [215, 847, 274, 859], [334, 57, 427, 69], [440, 57, 522, 69], [369, 75, 461, 88], [469, 75, 516, 88], [528, 76, 562, 88], [570, 76, 667, 88], [675, 75, 711, 87], [721, 79, 778, 88], [789, 75, 840, 88], [369, 97, 470, 107], [484, 94, 507, 106], [518, 94, 562, 107], [576, 94, 655, 110], [668, 94, 792, 109], [804, 95, 829, 107], [369, 113, 465, 125], [477, 116, 547, 125], [562, 113, 658, 125], [671, 116, 748, 125], [761, 113, 811, 125], [369, 131, 465, 143], [477, 133, 548, 143], [563, 130, 698, 145], [710, 130, 802, 146], [336, 171, 412, 183], [423, 171, 572, 183], [582, 170, 716, 184], [728, 171, 817, 187], [829, 171, 844, 186], [338, 197, 482, 212], [507, 196, 557, 209], [569, 196, 595, 208], [610, 196, 702, 209], [505, 214, 583, 226], [595, 214, 656, 227], [670, 215, 807, 227], [335, 259, 543, 274], [556, 259, 708, 272], [372, 279, 422, 291], [435, 279, 460, 291], [474, 279, 574, 292], [587, 278, 664, 291], [676, 278, 738, 291], [751, 279, 834, 291], [372, 298, 434, 310], [335, 341, 483, 354], [497, 341, 655, 354], [667, 341, 728, 354], [740, 341, 825, 354], [335, 360, 430, 372], [442, 360, 534, 372], [545, 359, 687, 372], [697, 360, 754, 372], [765, 360, 823, 373], [334, 378, 428, 391], [440, 378, 577, 394], [590, 378, 705, 391], [720, 378, 801, 391], [334, 397, 400, 409], [370, 416, 529, 429], [544, 416, 576, 432], [587, 416, 665, 428], [677, 416, 814, 429], [372, 435, 452, 450], [465, 434, 495, 447], [511, 434, 600, 447], [611, 436, 637, 447], [649, 436, 694, 451], [705, 438, 824, 447], [369, 453, 452, 466], [464, 454, 509, 466], [522, 453, 611, 469], [625, 453, 792, 469], [370, 472, 556, 488], [570, 472, 684, 487], [697, 472, 718, 485], [732, 472, 835, 488], [369, 490, 411, 503], [425, 490, 484, 503], [496, 490, 635, 506], [645, 490, 707, 503], [718, 491, 761, 503], [771, 490, 840, 503], [336, 510, 374, 521], [388, 510, 447, 522], [460, 510, 489, 521], [503, 510, 580, 522], [592, 509, 736, 525], [745, 509, 770, 522], [781, 509, 840, 522], [338, 528, 434, 541], [448, 528, 596, 541], [609, 527, 687, 540], [700, 528, 792, 541], [336, 546, 397, 559], [407, 546, 431, 559], [443, 546, 525, 560], [537, 546, 680, 562], [688, 546, 714, 559], [722, 546, 837, 562], [336, 565, 449, 581], [461, 565, 485, 577], [497, 565, 665, 581], [681, 565, 718, 577], [732, 565, 837, 580], [337, 584, 438, 597], [452, 583, 521, 596], [535, 584, 677, 599], [690, 583, 787, 596], [801, 583, 825, 596], [338, 602, 478, 615], [492, 602, 530, 614], [543, 602, 638, 615], [650, 602, 676, 614], [688, 602, 788, 615], [802, 602, 843, 614], [337, 621, 502, 633], [516, 621, 615, 637], [629, 621, 774, 636], [789, 621, 827, 633], [337, 639, 418, 652], [432, 640, 571, 653], [587, 639, 731, 655], [743, 639, 769, 652], [780, 639, 841, 652], [338, 658, 440, 673], [455, 658, 491, 670], [508, 658, 602, 671], [616, 658, 638, 670], [654, 658, 835, 674], [337, 677, 429, 689], [337, 714, 482, 726], [495, 714, 548, 726], [561, 714, 683, 726], [338, 770, 461, 782], [474, 769, 554, 785], [489, 788, 562, 803], [576, 788, 643, 801], [656, 787, 751, 804], [764, 788, 844, 801], [334, 825, 421, 838], [430, 824, 574, 838], [584, 824, 723, 841], [335, 844, 450, 857], [464, 843, 583, 860], [628, 862, 755, 875], [769, 861, 848, 878]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __UpperCAmelCase )
self.assertListEqual(encoding.boxes , __UpperCAmelCase )
# with apply_OCR = False
a__ : int = LayoutLMvaImageProcessor(apply_ocr=__UpperCAmelCase )
a__ : List[str] = image_processing(__UpperCAmelCase , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 224, 224) )
| 207
|
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE( ) -> int:
a__ : List[str] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
a__ : Optional[Any] = Dataset.from_dict(__UpperCamelCase )
return dataset
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _A ( self ):
"""simple docstring"""
a__ : List[str] = get_dataset()
a__ : str = make_duplicate_clusters(__UpperCAmelCase , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _A ( self ):
"""simple docstring"""
a__ : int = get_dataset()
a__ , a__ : Any = deduplicate_dataset(__UpperCAmelCase )
self.assertEqual(len(__UpperCAmelCase ) , 2 )
print(__UpperCAmelCase )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , __UpperCAmelCase )
| 207
| 1
|
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class __snake_case ( nn.Module ):
_a : int
_a : int
_a : float= 0.0
_a : int= 1
_a : int= 1
_a : bool= True
_a : bool= False
_a : bool= False
_a : bool= False
_a : jnp.dtype= jnp.floataa
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : int = []
lowercase : Tuple = []
for i in range(self.num_layers ):
lowercase : int = self.in_channels if i == 0 else self.out_channels
lowercase : Optional[int] = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
lowercase : List[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
lowercase : Dict = resnets
lowercase : List[Any] = attentions
if self.add_downsample:
lowercase : Dict = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,snake_case ,snake_case ,snake_case ,snake_case=True ):
'''simple docstring'''
lowercase : List[Any] = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
lowercase : List[Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
lowercase : Any = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
lowercase : Union[str, Any] = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __snake_case ( nn.Module ):
_a : int
_a : int
_a : float= 0.0
_a : int= 1
_a : bool= True
_a : jnp.dtype= jnp.floataa
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Union[str, Any] = []
for i in range(self.num_layers ):
lowercase : str = self.in_channels if i == 0 else self.out_channels
lowercase : Tuple = FlaxResnetBlockaD(
in_channels=lowerCamelCase_ ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
lowercase : int = resnets
if self.add_downsample:
lowercase : Optional[int] = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,snake_case ,snake_case ,snake_case=True ):
'''simple docstring'''
lowercase : Tuple = ()
for resnet in self.resnets:
lowercase : Union[str, Any] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
output_states += (hidden_states,)
if self.add_downsample:
lowercase : Tuple = self.downsamplers_a(lowerCamelCase_ )
output_states += (hidden_states,)
return hidden_states, output_states
class __snake_case ( nn.Module ):
_a : int
_a : int
_a : int
_a : float= 0.0
_a : int= 1
_a : int= 1
_a : bool= True
_a : bool= False
_a : bool= False
_a : bool= False
_a : jnp.dtype= jnp.floataa
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : List[Any] = []
lowercase : Optional[int] = []
for i in range(self.num_layers ):
lowercase : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
lowercase : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
lowercase : str = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
lowercase : Optional[Any] = resnets
lowercase : str = attentions
if self.add_upsample:
lowercase : List[Any] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,snake_case ,snake_case ,snake_case ,snake_case ,snake_case=True ):
'''simple docstring'''
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
lowercase : List[Any] = res_hidden_states_tuple[-1]
lowercase : Tuple = res_hidden_states_tuple[:-1]
lowercase : Tuple = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
lowercase : Dict = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
lowercase : int = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
lowercase : int = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class __snake_case ( nn.Module ):
_a : int
_a : int
_a : int
_a : float= 0.0
_a : int= 1
_a : bool= True
_a : jnp.dtype= jnp.floataa
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : str = []
for i in range(self.num_layers ):
lowercase : List[str] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
lowercase : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
lowercase : str = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
lowercase : Optional[int] = resnets
if self.add_upsample:
lowercase : Optional[int] = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self ,snake_case ,snake_case ,snake_case ,snake_case=True ):
'''simple docstring'''
for resnet in self.resnets:
# pop res hidden states
lowercase : List[Any] = res_hidden_states_tuple[-1]
lowercase : Optional[Any] = res_hidden_states_tuple[:-1]
lowercase : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
lowercase : Optional[int] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
if self.add_upsample:
lowercase : Optional[Any] = self.upsamplers_a(lowerCamelCase_ )
return hidden_states
class __snake_case ( nn.Module ):
_a : int
_a : float= 0.0
_a : int= 1
_a : int= 1
_a : bool= False
_a : bool= False
_a : jnp.dtype= jnp.floataa
def _SCREAMING_SNAKE_CASE ( self ):
'''simple docstring'''
lowercase : Optional[int] = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
lowercase : Any = []
for _ in range(self.num_layers ):
lowercase : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(lowerCamelCase_ )
lowercase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(lowerCamelCase_ )
lowercase : Optional[int] = resnets
lowercase : Dict = attentions
def __call__( self ,snake_case ,snake_case ,snake_case ,snake_case=True ):
'''simple docstring'''
lowercase : Union[str, Any] = self.resnets[0](lowerCamelCase_ ,lowerCamelCase_ )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
lowercase : str = attn(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
lowercase : List[str] = resnet(lowerCamelCase_ ,lowerCamelCase_ ,deterministic=lowerCamelCase_ )
return hidden_states
| 336
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1E-3 ) )
@slow
def UpperCAmelCase ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
UpperCAmelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1E-3 ) )
| 392
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
_lowerCAmelCase = logging.get_logger(__name__)
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self ,*__UpperCAmelCase ,**__UpperCAmelCase ) -> None:
warnings.warn(
"""The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use DeformableDetrImageProcessor instead.""" ,__UpperCAmelCase ,)
super().__init__(*__UpperCAmelCase ,**__UpperCAmelCase )
| 708
|
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=2 ,__UpperCAmelCase=True ,__UpperCAmelCase=False ,__UpperCAmelCase=10 ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 * 4 ,__UpperCAmelCase=32 * 6 ,__UpperCAmelCase=4 ,__UpperCAmelCase=32 ,) -> Optional[Any]:
lowerCAmelCase__ : int = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Optional[int] = use_auxiliary_loss
lowerCAmelCase__ : Optional[Any] = num_queries
lowerCAmelCase__ : List[str] = num_channels
lowerCAmelCase__ : List[Any] = min_size
lowerCAmelCase__ : Dict = max_size
lowerCAmelCase__ : Dict = num_labels
lowerCAmelCase__ : Any = mask_feature_size
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : Dict = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
__UpperCAmelCase )
lowerCAmelCase__ : List[str] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=__UpperCAmelCase ) > 0.5
).float()
lowerCAmelCase__ : List[str] = (torch.rand((self.batch_size, self.num_labels) ,device=__UpperCAmelCase ) > 0.5).long()
lowerCAmelCase__ : List[str] = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCAmelCase_ ( self ) -> Any:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] ,) ,decoder_config=DetrConfig(
decoder_ffn_dim=128 ,num_queries=self.num_queries ,decoder_attention_heads=2 ,d_model=self.mask_feature_size ,) ,mask_feature_size=self.mask_feature_size ,fpn_feature_size=self.mask_feature_size ,num_channels=self.num_channels ,num_labels=self.num_labels ,)
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs()
lowerCAmelCase__ : List[Any] = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Tuple = output.encoder_hidden_states
lowerCAmelCase__ : Dict = output.pixel_decoder_hidden_states
lowerCAmelCase__ : List[Any] = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(__UpperCAmelCase ) ,config.decoder_config.decoder_layers )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase=False ) -> int:
with torch.no_grad():
lowerCAmelCase__ : List[str] = MaskFormerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.mask_feature_size) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(__UpperCAmelCase ,__UpperCAmelCase )
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Dict:
lowerCAmelCase__ : Optional[Any] = MaskFormerForInstanceSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
def comm_check_on_output(__UpperCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
lowerCAmelCase__ : Optional[int] = model(
pixel_values=__UpperCAmelCase ,pixel_mask=__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
comm_check_on_output(__UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
'''simple docstring'''
__lowercase : Optional[Any] = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__lowercase : Optional[int] = (
{'''feature-extraction''': MaskFormerModel, '''image-segmentation''': MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__lowercase : List[Any] = False
__lowercase : str = False
__lowercase : Tuple = False
__lowercase : Optional[Any] = False
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : Any = MaskFormerModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self ,config_class=__UpperCAmelCase ,has_text_modality=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase_ ( self ) -> Optional[int]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*__UpperCAmelCase )
@unittest.skip(reason="""MaskFormer does not use inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> List[str]:
pass
@unittest.skip(reason="""MaskFormer does not have a get_input_embeddings method""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip(reason="""MaskFormer is not a generative model""" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip(reason="""MaskFormer does not use token embeddings""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : List[Any] = [*signature.parameters.keys()]
lowerCAmelCase__ : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
@slow
def UpperCAmelCase_ ( self ) -> Tuple:
for model_name in ["facebook/maskformer-swin-small-coco"]:
lowerCAmelCase__ : Dict = MaskFormerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ : Any = (self.model_tester.min_size,) * 2
lowerCAmelCase__ : Union[str, Any] = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=__UpperCAmelCase ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=__UpperCAmelCase ),
"""class_labels""": torch.zeros(2 ,10 ,device=__UpperCAmelCase ).long(),
}
lowerCAmelCase__ : Optional[Any] = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(__UpperCAmelCase ,**__UpperCAmelCase ,output_hidden_states=__UpperCAmelCase )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : List[Any] = model_class(__UpperCAmelCase ).to(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase ,output_attentions=__UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCAmelCase_ ( self ) -> Any:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Optional[int] = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : str = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : List[Any] = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase ).loss
loss.backward()
def UpperCAmelCase_ ( self ) -> Tuple:
# only MaskFormerForInstanceSegmentation has the loss
lowerCAmelCase__ : Optional[Any] = self.all_model_classes[1]
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : int = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCAmelCase__ : int = model(__UpperCAmelCase ,mask_labels=__UpperCAmelCase ,class_labels=__UpperCAmelCase )
lowerCAmelCase__ : int = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
lowerCAmelCase__ : Optional[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
lowerCAmelCase__ : List[str] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
lowerCAmelCase__ : Tuple = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=__UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowerCAmelCase = 1e-4
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Any = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@slow
class lowerCAmelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase_ ( self ) -> Optional[int]:
return (
MaskFormerImageProcessor.from_pretrained("""facebook/maskformer-swin-small-coco""" )
if is_vision_available()
else None
)
def UpperCAmelCase_ ( self ) -> int:
lowerCAmelCase__ : str = MaskFormerModel.from_pretrained("""facebook/maskformer-swin-small-coco""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.default_image_processor
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : Any = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Any = model(**__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(
[[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : List[Any] = torch.tensor(
[[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
lowerCAmelCase__ : Tuple = torch.tensor(
[[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(__UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> List[str]:
lowerCAmelCase__ : List[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : int = self.default_image_processor
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : Union[str, Any] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : Union[str, Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : Optional[int] = [
[-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3],
[-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5],
[-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2],
]
lowerCAmelCase__ : Dict = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : str = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[1.65_12E00, -5.25_72E00, -3.35_19E00],
[3.61_69E-02, -5.90_25E00, -2.93_13E00],
[1.07_66E-04, -7.76_30E00, -5.12_63E00],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-resnet101-coco-stuff""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : Optional[int] = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(__UpperCAmelCase ,return_tensors="""pt""" ).to(__UpperCAmelCase )
lowerCAmelCase__ : int = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(__UpperCAmelCase ,(1, 3, 800, 1088) )
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(**__UpperCAmelCase )
# masks_queries_logits
lowerCAmelCase__ : str = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) ,)
lowerCAmelCase__ : str = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]]
lowerCAmelCase__ : Union[str, Any] = torch.tensor(__UpperCAmelCase ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
# class_queries_logits
lowerCAmelCase__ : Any = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape ,(1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
lowerCAmelCase__ : List[str] = torch.tensor(
[[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,__UpperCAmelCase ,atol=__UpperCAmelCase ) )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Optional[int] = (
MaskFormerForInstanceSegmentation.from_pretrained("""facebook/maskformer-swin-small-coco""" )
.to(__UpperCAmelCase )
.eval()
)
lowerCAmelCase__ : List[str] = self.default_image_processor
lowerCAmelCase__ : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
lowerCAmelCase__ : Tuple = inputs["""pixel_values"""].to(__UpperCAmelCase )
lowerCAmelCase__ : int = [el.to(__UpperCAmelCase ) for el in inputs["""mask_labels"""]]
lowerCAmelCase__ : int = [el.to(__UpperCAmelCase ) for el in inputs["""class_labels"""]]
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**__UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 160
| 0
|
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
UpperCamelCase = "\\n Text data.\n Second line of data."
UpperCamelCase = "file"
@pytest.fixture(scope="""session""" )
def A ( lowercase__ : List[str] ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = tmp_path_factory.mktemp("""data""" ) / (FILE_PATH + """.zstd""")
UpperCamelCase__ :Optional[Any] = bytes(lowercase__ , """utf-8""" )
with zstd.open(lowercase__ , """wb""" ) as f:
f.write(lowercase__ )
return path
@pytest.fixture
def A ( lowercase__ : str ) -> int:
with open(os.path.join(tmpfs.local_root_dir , lowercase__ ) , """w""" ) as f:
f.write(lowercase__ )
return FILE_PATH
@pytest.mark.parametrize("""compression_format""" , ["""gzip""", """xz""", """zstd"""] )
def A ( lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : int , lowercase__ : List[str] , lowercase__ : Optional[int] , lowercase__ : Any ) -> Union[str, Any]:
UpperCamelCase__ :Optional[int] = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_path}
UpperCamelCase__ :List[Any] = input_paths[compression_format]
UpperCamelCase__ :Tuple = tmp_path / """cache"""
UpperCamelCase__ :Dict = DownloadConfig(cache_dir=lowercase__ , extract_compressed_file=lowercase__ )
UpperCamelCase__ :int = cached_path(lowercase__ , download_config=lowercase__ )
with open(lowercase__ ) as f:
UpperCamelCase__ :int = f.read()
with open(lowercase__ ) as f:
UpperCamelCase__ :Tuple = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize("""default_extracted""" , [True, False] )
@pytest.mark.parametrize("""default_cache_dir""" , [True, False] )
def A ( lowercase__ : Union[str, Any] , lowercase__ : Dict , lowercase__ : Dict , lowercase__ : Any , lowercase__ : List[Any] ) -> List[str]:
UpperCamelCase__ :Dict = """custom_cache"""
UpperCamelCase__ :Union[str, Any] = """custom_extracted_dir"""
UpperCamelCase__ :Optional[int] = tmp_path / """custom_extracted_path"""
if default_extracted:
UpperCamelCase__ :Union[str, Any] = ("""downloads""" if default_cache_dir else custom_cache_dir, """extracted""")
else:
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_DIR""" , lowercase__ )
monkeypatch.setattr("""datasets.config.EXTRACTED_DATASETS_PATH""" , str(lowercase__ ) )
UpperCamelCase__ :Dict = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
UpperCamelCase__ :Optional[int] = xz_file
UpperCamelCase__ :Optional[Any] = (
DownloadConfig(extract_compressed_file=lowercase__ )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=lowercase__ )
)
UpperCamelCase__ :str = cached_path(lowercase__ , download_config=lowercase__ )
assert Path(lowercase__ ).parent.parts[-2:] == expected
def A ( lowercase__ : List[str] ) -> Dict:
# absolute path
UpperCamelCase__ :Optional[Any] = str(Path(lowercase__ ).resolve() )
assert cached_path(lowercase__ ) == text_file
# relative path
UpperCamelCase__ :str = str(Path(lowercase__ ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(lowercase__ ) == text_file
def A ( lowercase__ : Optional[Any] ) -> Tuple:
# absolute path
UpperCamelCase__ :Tuple = str(tmp_path.resolve() / """__missing_file__.txt""" )
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
# relative path
UpperCamelCase__ :Tuple = """./__missing_file__.txt"""
with pytest.raises(lowercase__ ):
cached_path(lowercase__ )
def A ( lowercase__ : str ) -> Optional[int]:
UpperCamelCase__ :Any = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(lowercase__ ) as f:
UpperCamelCase__ :Tuple = f.read()
assert output_file_content == FILE_CONTENT
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( ) -> Tuple:
with pytest.raises(lowercase__ ):
cached_path("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : str ) -> Optional[int]:
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
http_get("""https://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
http_head("""https://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : List[Any] ) -> int:
UpperCamelCase__ :List[str] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
ftp_get("""ftp://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
ftp_head("""ftp://huggingface.co""" )
@patch("""datasets.config.HF_DATASETS_OFFLINE""" , lowercase__ )
def A ( lowercase__ : Optional[int] ) -> List[Any]:
UpperCamelCase__ :Optional[Any] = tmp_path_factory.mktemp("""data""" ) / """file.html"""
with pytest.raises(lowercase__ ):
fsspec_get("""s3://huggingface.co""" , temp_file=lowercase__ )
with pytest.raises(lowercase__ ):
fsspec_head("""s3://huggingface.co""" )
| 45
|
'''simple docstring'''
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case = """."""
if __name__ == "__main__":
snake_case = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
snake_case = []
snake_case = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case = line.strip()
snake_case = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case = """\n""".join(non_existent_paths)
raise ValueError(f'`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 378
| 0
|
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_UpperCamelCase = "\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n"
_UpperCamelCase = "\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper \"Evaluating Large Language Models Trained on Code\"\n(https://arxiv.org/abs/2107.03374).\n"
_UpperCamelCase = "\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric(\"code_eval\")\n >>> test_cases = [\"assert add(2,3)==5\"]\n >>> candidates = [[\"def add(a,b): return a*b\", \"def add(a, b): return a+b\"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {'pass@1': 0.5, 'pass@2': 1.0}\n"
_UpperCamelCase = "\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe \"code_eval\" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper \"Evaluating Large\nLanguage Models Trained on Code\" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL=\"1\". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ[\"HF_ALLOW_CODE_EVAL\"] = \"1\"\n\n################################################################################\\n"
_UpperCamelCase = "The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE."
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
def UpperCamelCase__ ( self ) ->int:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''string''' ) ),
'''references''': datasets.Value('''string''' ),
} ) , homepage='''https://github.com/openai/human-eval''' , codebase_urls=['''https://github.com/openai/human-eval'''] , reference_urls=['''https://github.com/openai/human-eval'''] , license=_LICENSE , )
def UpperCamelCase__ ( self , A_ , A_ , A_=[1, 10, 100] , A_=4 , A_=3.0 ) ->Dict:
'''simple docstring'''
if os.getenv('''HF_ALLOW_CODE_EVAL''' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('''This metric is currently not supported on Windows.''' )
with ThreadPoolExecutor(max_workers=A_ ) as executor:
__lowerCAmelCase : Tuple = []
__lowerCAmelCase : Optional[int] = Counter()
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = defaultdict(A_ )
for task_id, (candidates, test_case) in enumerate(zip(A_ , A_ ) ):
for candidate in candidates:
__lowerCAmelCase : Dict = candidate + '''\n''' + test_case
__lowerCAmelCase : Optional[Any] = (test_program, timeout, task_id, completion_id[task_id])
__lowerCAmelCase : Tuple = executor.submit(A_ , *A_ )
futures.append(A_ )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(A_ ):
__lowerCAmelCase : int = future.result()
results[result["task_id"]].append((result['''completion_id'''], result) )
__lowerCAmelCase : Dict = [], []
for result in results.values():
result.sort()
__lowerCAmelCase : Optional[Any] = [r[1]['''passed'''] for r in result]
total.append(len(A_ ) )
correct.append(sum(A_ ) )
__lowerCAmelCase : Optional[Any] = np.array(A_ )
__lowerCAmelCase : Dict = np.array(A_ )
__lowerCAmelCase : Optional[int] = k
__lowerCAmelCase : str = {f"""pass@{k}""": estimate_pass_at_k(A_ , A_ , A_ ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _lowercase ( lowercase__ , lowercase__ , lowercase__ ):
def estimator(lowercase__ , lowercase__ , lowercase__ ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(lowercase__ , lowercase__ ):
__lowerCAmelCase : List[str] = itertools.repeat(lowercase__ , len(lowercase__ ) )
else:
assert len(lowercase__ ) == len(lowercase__ )
__lowerCAmelCase : Tuple = iter(lowercase__ )
return np.array([estimator(int(lowercase__ ) , int(lowercase__ ) , lowercase__ ) for n, c in zip(lowercase__ , lowercase__ )] )
| 720
|
from __future__ import annotations
def _lowercase ( lowercase__ ):
if len(lowercase__ ) == 0:
return array
__lowerCAmelCase, __lowerCAmelCase : List[str] = min(lowercase__ ), max(lowercase__ )
# Compute the variables
__lowerCAmelCase : int = _max - _min + 1
__lowerCAmelCase, __lowerCAmelCase : Union[str, Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
__lowerCAmelCase : Optional[int] = i - _min
__lowerCAmelCase : Optional[int] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
__lowerCAmelCase : int = 0
for i in range(lowercase__ ):
while holes_repeat[i] > 0:
__lowerCAmelCase : List[Any] = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCamelCase = input("Enter numbers separated by comma:\n")
_UpperCamelCase = [int(x) for x in user_input.split(",")]
print(pigeon_sort(unsorted))
| 583
| 0
|
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : jnp.ndarray
@flax_register_to_config
class _snake_case (nn.Module , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__A : int =32
__A : int =4
__A : int =4
__A : Tuple[str] =(
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
__A : Tuple[str] =("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
__A : Union[bool, Tuple[bool]] =False
__A : Tuple[int] =(3_20, 6_40, 12_80, 12_80)
__A : int =2
__A : Union[int, Tuple[int]] =8
__A : Optional[Union[int, Tuple[int]]] =None
__A : int =12_80
__A : float =0.0
__A : bool =False
__A : jnp.dtype =jnp.floataa
__A : bool =True
__A : int =0
__A : bool =False
def UpperCamelCase__ ( self ,_snake_case ):
# init input tensors
UpperCAmelCase_ : List[str] = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ : Tuple = jnp.zeros(_snake_case ,dtype=jnp.floataa )
UpperCAmelCase_ : Optional[Any] = jnp.ones((1,) ,dtype=jnp.intaa )
UpperCAmelCase_ : Optional[Any] = jnp.zeros((1, 1, self.cross_attention_dim) ,dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = jax.random.split(_snake_case )
UpperCAmelCase_ : int = {"params": params_rng, "dropout": dropout_rng}
return self.init(_snake_case ,_snake_case ,_snake_case ,_snake_case )["params"]
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.block_out_channels
UpperCAmelCase_ : Tuple = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19." )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ : Union[str, Any] = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ : List[Any] = nn.Conv(
block_out_channels[0] ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
# time
UpperCAmelCase_ : Tuple = FlaxTimesteps(
block_out_channels[0] ,flip_sin_to_cos=self.flip_sin_to_cos ,freq_shift=self.config.freq_shift )
UpperCAmelCase_ : int = FlaxTimestepEmbedding(_snake_case ,dtype=self.dtype )
UpperCAmelCase_ : str = self.only_cross_attention
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ : int = []
UpperCAmelCase_ : Optional[Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ : Any = output_channel
UpperCAmelCase_ : List[Any] = block_out_channels[i]
UpperCAmelCase_ : str = i == len(_snake_case ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ : Optional[Any] = FlaxCrossAttnDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,num_attention_heads=num_attention_heads[i] ,add_downsample=not is_final_block ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : Optional[int] = FlaxDownBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,dropout=self.dropout ,num_layers=self.layers_per_block ,add_downsample=not is_final_block ,dtype=self.dtype ,)
down_blocks.append(_snake_case )
UpperCAmelCase_ : Any = down_blocks
# mid
UpperCAmelCase_ : Union[str, Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] ,dropout=self.dropout ,num_attention_heads=num_attention_heads[-1] ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
# up
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case ) )
UpperCAmelCase_ : Union[str, Any] = list(reversed(_snake_case ) )
UpperCAmelCase_ : str = list(reversed(_snake_case ) )
UpperCAmelCase_ : Any = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
UpperCAmelCase_ : Tuple = output_channel
UpperCAmelCase_ : Any = reversed_block_out_channels[i]
UpperCAmelCase_ : List[Any] = reversed_block_out_channels[min(i + 1 ,len(_snake_case ) - 1 )]
UpperCAmelCase_ : Optional[Any] = i == len(_snake_case ) - 1
if up_block_type == "CrossAttnUpBlock2D":
UpperCAmelCase_ : Dict = FlaxCrossAttnUpBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,prev_output_channel=_snake_case ,num_layers=self.layers_per_block + 1 ,num_attention_heads=reversed_num_attention_heads[i] ,add_upsample=not is_final_block ,dropout=self.dropout ,use_linear_projection=self.use_linear_projection ,only_cross_attention=only_cross_attention[i] ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
else:
UpperCAmelCase_ : str = FlaxUpBlockaD(
in_channels=_snake_case ,out_channels=_snake_case ,prev_output_channel=_snake_case ,num_layers=self.layers_per_block + 1 ,add_upsample=not is_final_block ,dropout=self.dropout ,dtype=self.dtype ,)
up_blocks.append(_snake_case )
UpperCAmelCase_ : List[Any] = output_channel
UpperCAmelCase_ : List[str] = up_blocks
# out
UpperCAmelCase_ : Optional[int] = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
UpperCAmelCase_ : str = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case = True ,_snake_case = False ,):
# 1. time
if not isinstance(_snake_case ,jnp.ndarray ):
UpperCAmelCase_ : Optional[int] = jnp.array([timesteps] ,dtype=jnp.intaa )
elif isinstance(_snake_case ,jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ : List[str] = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ : Optional[int] = jnp.expand_dims(_snake_case ,0 )
UpperCAmelCase_ : Union[str, Any] = self.time_proj(_snake_case )
UpperCAmelCase_ : Optional[Any] = self.time_embedding(_snake_case )
# 2. pre-process
UpperCAmelCase_ : Optional[int] = jnp.transpose(_snake_case ,(0, 2, 3, 1) )
UpperCAmelCase_ : Union[str, Any] = self.conv_in(_snake_case )
# 3. down
UpperCAmelCase_ : int = (sample,)
for down_block in self.down_blocks:
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = down_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = down_block(_snake_case ,_snake_case ,deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
UpperCAmelCase_ : int = ()
for down_block_res_sample, down_block_additional_residual in zip(
_snake_case ,_snake_case ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ : Optional[Any] = new_down_block_res_samples
# 4. mid
UpperCAmelCase_ : Dict = self.mid_block(_snake_case ,_snake_case ,_snake_case ,deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
UpperCAmelCase_ : List[str] = down_block_res_samples[-(self.layers_per_block + 1) :]
UpperCAmelCase_ : Tuple = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_snake_case ,_snake_case ):
UpperCAmelCase_ : Dict = up_block(
_snake_case ,temb=_snake_case ,encoder_hidden_states=_snake_case ,res_hidden_states_tuple=_snake_case ,deterministic=not train ,)
else:
UpperCAmelCase_ : Tuple = up_block(_snake_case ,temb=_snake_case ,res_hidden_states_tuple=_snake_case ,deterministic=not train )
# 6. post-process
UpperCAmelCase_ : Optional[int] = self.conv_norm_out(_snake_case )
UpperCAmelCase_ : List[Any] = nn.silu(_snake_case )
UpperCAmelCase_ : str = self.conv_out(_snake_case )
UpperCAmelCase_ : str = jnp.transpose(_snake_case ,(0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_snake_case )
| 71
|
'''simple docstring'''
from __future__ import annotations
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if partitions <= 0:
raise ValueError("""partitions must be a positive number!""" )
if partitions > number_of_bytes:
raise ValueError("""partitions can not > number_of_bytes!""" )
A_ : int = number_of_bytes // partitions
A_ : Union[str, Any] = []
for i in range(lowerCamelCase__ ):
A_ : Dict = i * bytes_per_partition + 1
A_ : Tuple = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 667
| 0
|
from __future__ import annotations
__UpperCAmelCase = [True] * 1_0_0_0_0_0_1
__UpperCAmelCase = 2
while i * i <= 1_0_0_0_0_0_0:
if seive[i]:
for j in range(i * i, 1_0_0_0_0_0_1, i):
__UpperCAmelCase = False
i += 1
def _snake_case ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return seive[n]
def _snake_case ( SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return any(digit in "02468" for digit in str(SCREAMING_SNAKE_CASE ) )
def _snake_case ( SCREAMING_SNAKE_CASE = 1000000 ) -> list[int]:
"""simple docstring"""
_lowerCAmelCase : Optional[int] = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(SCREAMING_SNAKE_CASE ) and not contains_an_even_digit(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase : Optional[Any] = str(SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Union[str, Any] = [int(str_num[j:] + str_num[:j] ) for j in range(len(SCREAMING_SNAKE_CASE ) )]
if all(is_prime(SCREAMING_SNAKE_CASE ) for i in list_nums ):
result.append(SCREAMING_SNAKE_CASE )
return result
def _snake_case ( ) -> int:
"""simple docstring"""
return len(find_circular_primes() )
if __name__ == "__main__":
print(f'''{len(find_circular_primes()) = }''')
| 720
|
from typing import Dict, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None ) -> str:
"""simple docstring"""
_lowerCAmelCase : str = tesseract_config if tesseract_config is not None else ""
# apply OCR
_lowerCAmelCase : List[str] = to_pil_image(SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase : List[str] = pil_image.size
_lowerCAmelCase : Union[str, Any] = pytesseract.image_to_data(SCREAMING_SNAKE_CASE , lang=SCREAMING_SNAKE_CASE , output_type="dict" , config=SCREAMING_SNAKE_CASE )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
_lowerCAmelCase : Dict = [idx for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if not word.strip()]
_lowerCAmelCase : Any = [word for idx, word in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowerCAmelCase : Dict = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[Any] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
_lowerCAmelCase : Optional[int] = [coord for idx, coord in enumerate(SCREAMING_SNAKE_CASE ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
_lowerCAmelCase : Union[str, Any] = []
for x, y, w, h in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowerCAmelCase : Tuple = [x, y, x + w, y + h]
actual_boxes.append(SCREAMING_SNAKE_CASE )
# finally, normalize the bounding boxes
_lowerCAmelCase : Dict = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class A__ ( A ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''pixel_values''']
def __init__( self : Tuple , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : bool = True , A_ : Optional[str] = None , A_ : Optional[str] = "" , **A_ : Dict , ):
'''simple docstring'''
super().__init__(**A_ )
_lowerCAmelCase : int = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
_lowerCAmelCase : Any = get_size_dict(A_ )
_lowerCAmelCase : Any = do_resize
_lowerCAmelCase : Any = size
_lowerCAmelCase : Dict = resample
_lowerCAmelCase : Optional[int] = apply_ocr
_lowerCAmelCase : Dict = ocr_lang
_lowerCAmelCase : Optional[int] = tesseract_config
def __magic_name__ ( self : Dict , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PILImageResampling.BILINEAR , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[str] , ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' )
_lowerCAmelCase : Dict = (size["height"], size["width"])
return resize(A_ , size=A_ , resample=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Dict , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : PILImageResampling = None , A_ : bool = None , A_ : Optional[str] = None , A_ : Optional[str] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Dict , ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = size if size is not None else self.size
_lowerCAmelCase : List[Any] = get_size_dict(A_ )
_lowerCAmelCase : Tuple = resample if resample is not None else self.resample
_lowerCAmelCase : Union[str, Any] = apply_ocr if apply_ocr is not None else self.apply_ocr
_lowerCAmelCase : Tuple = ocr_lang if ocr_lang is not None else self.ocr_lang
_lowerCAmelCase : str = tesseract_config if tesseract_config is not None else self.tesseract_config
_lowerCAmelCase : Union[str, Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase : Dict = [to_numpy_array(A_ ) for image in images]
if apply_ocr:
requires_backends(self , "pytesseract" )
_lowerCAmelCase : Any = []
_lowerCAmelCase : List[Any] = []
for image in images:
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = apply_tesseract(A_ , A_ , A_ )
words_batch.append(A_ )
boxes_batch.append(A_ )
if do_resize:
_lowerCAmelCase : Union[str, Any] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
# flip color channels from RGB to BGR (as Detectron2 requires this)
_lowerCAmelCase : Optional[int] = [flip_channel_order(A_ ) for image in images]
_lowerCAmelCase : List[Any] = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowerCAmelCase : Tuple = BatchFeature(data={"pixel_values": images} , tensor_type=A_ )
if apply_ocr:
_lowerCAmelCase : Tuple = words_batch
_lowerCAmelCase : Any = boxes_batch
return data
| 503
| 0
|
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Tuple = logging.get_logger(__name__)
_lowerCAmelCase : Optional[int] = {
"huggingface/time-series-transformer-tourism-monthly": (
"https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json"
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class UpperCAmelCase_ ( _UpperCamelCase ):
__SCREAMING_SNAKE_CASE : str = 'time_series_transformer'
__SCREAMING_SNAKE_CASE : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : List[str] , A : Optional[int] = None , A : Optional[int] = None , A : str = "student_t" , A : str = "nll" , A : int = 1 , A : List[int] = [1, 2, 3, 4, 5, 6, 7] , A : Optional[Union[str, bool]] = "mean" , A : int = 0 , A : int = 0 , A : int = 0 , A : int = 0 , A : Optional[List[int]] = None , A : Optional[List[int]] = None , A : int = 3_2 , A : int = 3_2 , A : int = 2 , A : int = 2 , A : int = 2 , A : int = 2 , A : bool = True , A : str = "gelu" , A : int = 6_4 , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : float = 0.1 , A : int = 1_0_0 , A : float = 0.02 , A : List[str]=True , **A : Dict , ):
# time series specific configuration
_UpperCAmelCase : int = prediction_length
_UpperCAmelCase : str = context_length or prediction_length
_UpperCAmelCase : Tuple = distribution_output
_UpperCAmelCase : Union[str, Any] = loss
_UpperCAmelCase : List[str] = input_size
_UpperCAmelCase : Union[str, Any] = num_time_features
_UpperCAmelCase : Tuple = lags_sequence
_UpperCAmelCase : int = scaling
_UpperCAmelCase : List[Any] = num_dynamic_real_features
_UpperCAmelCase : str = num_static_real_features
_UpperCAmelCase : Union[str, Any] = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"The cardinality should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : List[str] = cardinality
else:
_UpperCAmelCase : Any = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(A ) != num_static_categorical_features:
raise ValueError(
"The embedding dimension should be a list of the same length as `num_static_categorical_features`" )
_UpperCAmelCase : Tuple = embedding_dimension
else:
_UpperCAmelCase : List[str] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase : Dict = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase : Tuple = input_size * len(A ) + self._number_of_features
_UpperCAmelCase : Optional[int] = d_model
_UpperCAmelCase : List[str] = encoder_attention_heads
_UpperCAmelCase : Dict = decoder_attention_heads
_UpperCAmelCase : Optional[int] = encoder_ffn_dim
_UpperCAmelCase : str = decoder_ffn_dim
_UpperCAmelCase : str = encoder_layers
_UpperCAmelCase : Optional[int] = decoder_layers
_UpperCAmelCase : Optional[int] = dropout
_UpperCAmelCase : str = attention_dropout
_UpperCAmelCase : Optional[Any] = activation_dropout
_UpperCAmelCase : int = encoder_layerdrop
_UpperCAmelCase : Dict = decoder_layerdrop
_UpperCAmelCase : Dict = activation_function
_UpperCAmelCase : Any = init_std
_UpperCAmelCase : int = use_cache
super().__init__(is_encoder_decoder=A , **A )
@property
def snake_case_ ( self : List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 289
|
"""simple docstring"""
import requests
_lowerCAmelCase : List[Any] = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def __snake_case ( SCREAMING_SNAKE_CASE__ : str ) -> None:
'''simple docstring'''
_UpperCAmelCase : List[str] = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(f'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 289
| 1
|
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
__SCREAMING_SNAKE_CASE =logging.getLogger(__name__)
@dataclass
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[float] = field(
default=0.0 , metadata={"help": "The label smoothing epsilon to apply (if not zero)."})
SCREAMING_SNAKE_CASE__ : bool = field(default=__UpperCAmelCase , metadata={"help": "Whether to SortishSamler or not."})
SCREAMING_SNAKE_CASE__ : bool = field(
default=__UpperCAmelCase , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."})
SCREAMING_SNAKE_CASE__ : bool = field(default=__UpperCAmelCase , metadata={"help": "whether to use adafactor"})
SCREAMING_SNAKE_CASE__ : Optional[float] = field(
default=__UpperCAmelCase , metadata={"help": "Encoder layer dropout probability. Goes into model.config."})
SCREAMING_SNAKE_CASE__ : Optional[float] = field(
default=__UpperCAmelCase , metadata={"help": "Decoder layer dropout probability. Goes into model.config."})
SCREAMING_SNAKE_CASE__ : Optional[float] = field(default=__UpperCAmelCase , metadata={"help": "Dropout probability. Goes into model.config."})
SCREAMING_SNAKE_CASE__ : Optional[float] = field(
default=__UpperCAmelCase , metadata={"help": "Attention dropout probability. Goes into model.config."})
SCREAMING_SNAKE_CASE__ : Optional[str] = field(
default="linear" , metadata={"help": f'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys())}'} , )
| 89
|
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
__SCREAMING_SNAKE_CASE =1.0_5_4_5_7_1_8_1_7e-3_4 # unit of ℏ : J * s
__SCREAMING_SNAKE_CASE =3e8 # unit of c : m * s^-1
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if (force, area, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if force < 0:
raise ValueError('''Magnitude of force can not be negative''' )
if distance < 0:
raise ValueError('''Distance can not be negative''' )
if area < 0:
raise ValueError('''Area can not be negative''' )
if force == 0:
SCREAMING_SNAKE_CASE_ = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
SCREAMING_SNAKE_CASE_ = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
SCREAMING_SNAKE_CASE_ = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('''One and only one argument must be 0''' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 89
| 1
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_A = logging.get_logger(__name__)
_A = {
"""microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""",
}
class _lowerCamelCase ( a_ , a_ ):
_lowerCamelCase :Optional[int] = "focalnet"
def __init__( self : str , UpperCamelCase : Optional[Any]=2_24 , UpperCamelCase : int=4 , UpperCamelCase : str=3 , UpperCamelCase : Any=96 , UpperCamelCase : Dict=False , UpperCamelCase : Union[str, Any]=[1_92, 3_84, 7_68, 7_68] , UpperCamelCase : Optional[int]=[2, 2, 6, 2] , UpperCamelCase : Dict=[2, 2, 2, 2] , UpperCamelCase : Optional[int]=[3, 3, 3, 3] , UpperCamelCase : Optional[int]="gelu" , UpperCamelCase : List[str]=4.0 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Tuple=False , UpperCamelCase : List[str]=1E-4 , UpperCamelCase : List[Any]=False , UpperCamelCase : Tuple=False , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=0.02 , UpperCamelCase : List[str]=1E-5 , UpperCamelCase : List[Any]=32 , UpperCamelCase : List[str]=None , UpperCamelCase : List[str]=None , **UpperCamelCase : Any , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**UpperCamelCase )
lowerCAmelCase__ : Optional[int] = image_size
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : int = embed_dim
lowerCAmelCase__ : Union[str, Any] = use_conv_embed
lowerCAmelCase__ : int = hidden_sizes
lowerCAmelCase__ : Optional[Any] = depths
lowerCAmelCase__ : List[Any] = focal_levels
lowerCAmelCase__ : int = focal_windows
lowerCAmelCase__ : int = hidden_act
lowerCAmelCase__ : List[Any] = mlp_ratio
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : Tuple = drop_path_rate
lowerCAmelCase__ : int = use_layerscale
lowerCAmelCase__ : List[str] = layerscale_value
lowerCAmelCase__ : List[Any] = use_post_layernorm
lowerCAmelCase__ : Dict = use_post_layernorm_in_modulation
lowerCAmelCase__ : Tuple = normalize_modulator
lowerCAmelCase__ : str = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : int = encoder_stride
lowerCAmelCase__ : Optional[int] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(
out_features=UpperCamelCase , out_indices=UpperCamelCase , stage_names=self.stage_names )
| 299
|
"""simple docstring"""
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class _lowerCamelCase ( a_ ):
def __init__( self : Optional[Any] , UpperCamelCase : pyspark.sql.DataFrame , UpperCamelCase : Optional[NamedSplit] = None , UpperCamelCase : Optional[Features] = None , UpperCamelCase : bool = True , UpperCamelCase : str = None , UpperCamelCase : bool = False , UpperCamelCase : str = None , UpperCamelCase : bool = True , UpperCamelCase : str = "arrow" , **UpperCamelCase : Optional[int] , ) -> List[str]:
"""simple docstring"""
super().__init__(
split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , )
lowerCAmelCase__ : Union[str, Any] = load_from_cache_file
lowerCAmelCase__ : List[str] = file_format
lowerCAmelCase__ : Any = Spark(
df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , )
def _lowerCAmelCase ( self : int ) -> int:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowerCAmelCase__ : List[str] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCamelCase , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 299
| 1
|
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class a__ :
"""simple docstring"""
A__ : Optional[int] = 42
A__ : int = None
A__ : str = None
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
def is_valid_tree(_UpperCAmelCase ) -> bool:
if node is None:
return True
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(lowerCAmelCase__ ):
raise ValueError(
'Each node should be type of TreeNode and data should be float.' )
def is_binary_search_tree_recursive_check(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , lowerCAmelCase__ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , lowerCAmelCase__ )
)
return is_binary_search_tree_recursive_check(lowerCAmelCase__ , -float('inf' ) , float('inf' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714
|
from queue import PriorityQueue
from typing import Any
import numpy as np
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
"""simple docstring"""
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
lowercase = cst_fwd.get(_UpperCAmelCase , np.inf )
lowercase = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
lowercase = new_cost_f
lowercase = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
lowercase = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = -1
lowercase = set()
lowercase = set()
lowercase = {source: 0}
lowercase = {destination: 0}
lowercase = {source: None}
lowercase = {destination: None}
lowercase = PriorityQueue()
lowercase = PriorityQueue()
lowercase = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
lowercase , lowercase = queue_forward.get()
visited_forward.add(_UpperCAmelCase )
lowercase , lowercase = queue_backward.get()
visited_backward.add(_UpperCAmelCase )
lowercase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
lowercase = pass_and_relaxation(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
lowercase = shortest_distance
return shortest_path_distance
__magic_name__ = {
'''B''': [['''C''', 1]],
'''C''': [['''D''', 1]],
'''D''': [['''F''', 1]],
'''E''': [['''B''', 1], ['''G''', 2]],
'''F''': [],
'''G''': [['''F''', 1]],
}
__magic_name__ = {
'''B''': [['''E''', 1]],
'''C''': [['''B''', 1]],
'''D''': [['''C''', 1]],
'''F''': [['''D''', 1], ['''G''', 1]],
'''E''': [[None, np.inf]],
'''G''': [['''E''', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 314
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_A = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["FunnelTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 290
|
'''simple docstring'''
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase__ ( __UpperCamelCase )-> int:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(__UpperCamelCase ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
UpperCamelCase = try_infer_format_from_ext(args.input ) if args.format == """infer""" else args.format
UpperCamelCase = PipelineDataFormat.from_str(
format=__UpperCamelCase , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(__UpperCamelCase , __UpperCamelCase )
class a_ ( lowerCamelCase ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
UpperCamelCase = nlp
UpperCamelCase = reader
@staticmethod
def A__ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = parser.add_parser("""run""" , help="""Run a pipeline through the CLI""" )
run_parser.add_argument("""--task""" , choices=get_supported_tasks() , help="""Task to run""" )
run_parser.add_argument("""--input""" , type=_SCREAMING_SNAKE_CASE , help="""Path to the file to use for inference""" )
run_parser.add_argument("""--output""" , type=_SCREAMING_SNAKE_CASE , help="""Path to the file that will be used post to write results.""" )
run_parser.add_argument("""--model""" , type=_SCREAMING_SNAKE_CASE , help="""Name or path to the model to instantiate.""" )
run_parser.add_argument("""--config""" , type=_SCREAMING_SNAKE_CASE , help="""Name or path to the model's config to instantiate.""" )
run_parser.add_argument(
"""--tokenizer""" , type=_SCREAMING_SNAKE_CASE , help="""Name of the tokenizer to use. (default: same as the model name)""" )
run_parser.add_argument(
"""--column""" , type=_SCREAMING_SNAKE_CASE , help="""Name of the column to use as input. (For multi columns input as QA use column1,columns2)""" , )
run_parser.add_argument(
"""--format""" , type=_SCREAMING_SNAKE_CASE , default="""infer""" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="""Input format to read from""" , )
run_parser.add_argument(
"""--device""" , type=_SCREAMING_SNAKE_CASE , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , )
run_parser.add_argument("""--overwrite""" , action="""store_true""" , help="""Allow overwriting the output file.""" )
run_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = self._nlp, []
for entry in self._reader:
UpperCamelCase = nlp(**_SCREAMING_SNAKE_CASE ) if self._reader.is_multi_columns else nlp(_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
outputs.append(_SCREAMING_SNAKE_CASE )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
UpperCamelCase = self._reader.save_binary(_SCREAMING_SNAKE_CASE )
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}" )
else:
self._reader.save(_SCREAMING_SNAKE_CASE )
| 301
| 0
|
class __UpperCamelCase :
def __init__( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = ""
UpperCAmelCase_ = ""
UpperCAmelCase_ = []
def __A ( self : List[Any] , lowerCAmelCase : int , lowerCAmelCase : int ):
'''simple docstring'''
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
UpperCAmelCase_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
UpperCAmelCase_ = self.__min_dist_top_down_dp(__UpperCamelCase , n - 1 )
UpperCAmelCase_ = self.__min_dist_top_down_dp(m - 1 , __UpperCamelCase )
UpperCAmelCase_ = self.__min_dist_top_down_dp(m - 1 , n - 1 )
UpperCAmelCase_ = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self.dp[m][n]
def __A ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = worda
UpperCAmelCase_ = worda
UpperCAmelCase_ = [[-1 for _ in range(len(__UpperCamelCase ) )] for _ in range(len(__UpperCamelCase ) )]
return self.__min_dist_top_down_dp(len(__UpperCamelCase ) - 1 , len(__UpperCamelCase ) - 1 )
def __A ( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : str ):
'''simple docstring'''
UpperCAmelCase_ = worda
UpperCAmelCase_ = worda
UpperCAmelCase_ = len(__UpperCamelCase )
UpperCAmelCase_ = len(__UpperCamelCase )
UpperCAmelCase_ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
UpperCAmelCase_ = j
elif j == 0: # second string is empty
UpperCAmelCase_ = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
UpperCAmelCase_ = self.dp[i - 1][j - 1]
else:
UpperCAmelCase_ = self.dp[i][j - 1]
UpperCAmelCase_ = self.dp[i - 1][j]
UpperCAmelCase_ = self.dp[i - 1][j - 1]
UpperCAmelCase_ = 1 + min(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
_a: List[Any] = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_a: Dict = input("""Enter the first string: """).strip()
_a: str = input("""Enter the second string: """).strip()
print()
print(F'The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}')
print(F'The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}')
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 707
|
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_a: Optional[Any] = logging.get_logger(__name__)
def __lowerCAmelCase ( A ):
if isinstance(A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A ):
return [[videos]]
raise ValueError(F"Could not make batched video from {videos}" )
class __UpperCamelCase ( lowercase ):
SCREAMING_SNAKE_CASE__ = ['pixel_values']
def __init__( self : int , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : bool = True , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = True , lowerCAmelCase : Union[int, float] = 1 / 255 , lowerCAmelCase : bool = True , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , **lowerCAmelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 256}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = offset
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(F"Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}" )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : List[str] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, int] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"Size must have 'height' and 'width' as keys. Got {size.keys()}" )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : List[Any] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[int, float] , lowerCAmelCase : bool = True , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : List[str] , ):
'''simple docstring'''
UpperCAmelCase_ = image.astype(np.floataa )
if offset:
UpperCAmelCase_ = image - (scale / 2)
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Tuple , lowerCAmelCase : np.ndarray , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Union[float, List[float]] , lowerCAmelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCAmelCase : int , ):
'''simple docstring'''
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def __A ( self : Union[str, Any] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase , offset=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def __A ( self : Optional[int] , lowerCAmelCase : ImageInput , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : PILImageResampling = None , lowerCAmelCase : bool = None , lowerCAmelCase : Dict[str, int] = None , lowerCAmelCase : bool = None , lowerCAmelCase : float = None , lowerCAmelCase : bool = None , lowerCAmelCase : bool = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[float, List[float]]] = None , lowerCAmelCase : Optional[Union[str, TensorType]] = None , lowerCAmelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCAmelCase : Dict , ):
'''simple docstring'''
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = offset if offset is not None else self.offset
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , offset=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 268
| 0
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class __SCREAMING_SNAKE_CASE :
def __init__( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =''''''
lowercase : Tuple =''''''
lowercase : Tuple =[]
lowercase : int =0
lowercase : Union[str, Any] =256
lowercase : Optional[int] =0
lowercase : int =0
lowercase : List[str] =0
lowercase : Dict =0
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
lowercase : Optional[Any] =cva.imread(UpperCAmelCase__ , 0 )
lowercase : List[Any] =copy.deepcopy(self.img )
lowercase , lowercase , lowercase : Union[str, Any] =plt.hist(self.img.ravel() , 256 , [0, 256] , label='''x''' )
lowercase : Optional[Any] =np.sum(UpperCAmelCase__ )
for i in range(len(UpperCAmelCase__ ) ):
lowercase : List[str] =x[i] / self.k
self.sk += prk
lowercase : List[Any] =(self.L - 1) * self.sk
if self.rem != 0:
lowercase : Optional[Any] =int(last % last )
lowercase : str =int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(UpperCAmelCase__ )
lowercase : Optional[int] =int(np.ma.count(self.img ) / self.img[1].size )
lowercase : Optional[int] =self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
lowercase : Optional[Any] =self.img[j][i]
if num != self.last_list[num]:
lowercase : List[str] =self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
plt.hist(self.img.ravel() , 256 , [0, 256] )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5000 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 92
|
'''simple docstring'''
def _snake_case ( A_ : Optional[int] ):
"""simple docstring"""
a_ : str = len(A_ )
for i in range(length - 1 ):
a_ : List[Any] = i
for k in range(i + 1 , A_ ):
if collection[k] < collection[least]:
a_ : Union[str, Any] = k
if least != i:
a_ , a_ : int = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__snake_case: Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
__snake_case: Any = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 577
| 0
|
'''simple docstring'''
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: str =AutoConfig.from_pretrained(__a )
lowerCamelCase__: Any =FlaxAutoModelForSeqaSeqLM.from_config(config=__a )
lowerCamelCase__: Optional[Any] =checkpoints.load_tax_checkpoint(__a )
lowerCamelCase__: str ="wi_0" in tax_model["target"]["encoder"]["layers_0"]["mlp"]
if config.model_type == "t5":
lowerCamelCase__: Optional[int] ="SelfAttention"
if config.model_type == "longt5" and config.encoder_attention_type == "local":
lowerCamelCase__: List[Any] ="LocalSelfAttention"
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__: str ="TransientGlobalSelfAttention"
else:
raise ValueError(
"Given config is expected to have `model_type=\'t5\'`, or `model_type=\'longt5` with `encoder_attention_type`"
" attribute with a value from [\'local\', \'transient-global]." )
# Encoder
for layer_index in range(config.num_layers ):
lowerCamelCase__: Union[str, Any] =F"""layers_{str(__a )}"""
# Self-Attention
lowerCamelCase__: Union[str, Any] =tax_model["target"]["encoder"][layer_name]["attention"]["key"]["kernel"]
lowerCamelCase__: Union[str, Any] =tax_model["target"]["encoder"][layer_name]["attention"]["out"]["kernel"]
lowerCamelCase__: List[str] =tax_model["target"]["encoder"][layer_name]["attention"]["query"]["kernel"]
lowerCamelCase__: Tuple =tax_model["target"]["encoder"][layer_name]["attention"]["value"]["kernel"]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__: Any =tax_model["target"]["encoder"][layer_name]["attention"]["T5LayerNorm_0"]["scale"]
# Layer Normalization
lowerCamelCase__: int =tax_model["target"]["encoder"][layer_name]["pre_attention_layer_norm"]["scale"]
if split_mlp_wi:
lowerCamelCase__: List[str] =tax_model["target"]["encoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowerCamelCase__: Dict =tax_model["target"]["encoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowerCamelCase__: Optional[Any] =tax_model["target"]["encoder"][layer_name]["mlp"]["wi"]["kernel"]
lowerCamelCase__: Union[str, Any] =tax_model["target"]["encoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowerCamelCase__: Union[str, Any] =tax_model["target"]["encoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowerCamelCase__: int =flax_model.params["encoder"]["block"][str(__a )]["layer"]
lowerCamelCase__: Union[str, Any] =tax_attention_key
lowerCamelCase__: str =tax_attention_out
lowerCamelCase__: Dict =tax_attention_query
lowerCamelCase__: Union[str, Any] =tax_attention_value
lowerCamelCase__: List[Any] =tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__: Dict =tax_global_layer_norm
if split_mlp_wi:
lowerCamelCase__: Any =tax_mlp_wi_a
lowerCamelCase__: str =tax_mlp_wi_a
else:
lowerCamelCase__: Dict =tax_mlp_wi
lowerCamelCase__: List[Any] =tax_mlp_wo
lowerCamelCase__: List[str] =tax_mlp_layer_norm
lowerCamelCase__: Tuple =flax_model_encoder_layer_block
# Only for layer 0:
lowerCamelCase__: Optional[int] =tax_model["target"]["encoder"]["relpos_bias"]["rel_embedding"].T
lowerCamelCase__: Union[str, Any] =tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
lowerCamelCase__: Any =tax_model["target"]["encoder"]["side_relpos_bias"]["rel_embedding"].T
lowerCamelCase__: List[Any] =tax_encoder_global_rel_embedding
# Assigning
lowerCamelCase__: Union[str, Any] =tax_model["target"]["encoder"]["encoder_norm"]["scale"]
lowerCamelCase__: Optional[Any] =tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
lowerCamelCase__: str =F"""layers_{str(__a )}"""
# Self-Attention
lowerCamelCase__: int =tax_model["target"]["decoder"][layer_name]["self_attention"]["key"]["kernel"]
lowerCamelCase__: Union[str, Any] =tax_model["target"]["decoder"][layer_name]["self_attention"]["out"]["kernel"]
lowerCamelCase__: int =tax_model["target"]["decoder"][layer_name]["self_attention"]["query"]["kernel"]
lowerCamelCase__: int =tax_model["target"]["decoder"][layer_name]["self_attention"]["value"]["kernel"]
# Layer Normalization
lowerCamelCase__: Optional[Any] =tax_model["target"]["decoder"][layer_name]["pre_self_attention_layer_norm"][
"scale"
]
# Encoder-Decoder-Attention
lowerCamelCase__: Union[str, Any] =tax_model["target"]["decoder"][layer_name]["encoder_decoder_attention"]
lowerCamelCase__: Any =tax_enc_dec_attention_module["key"]["kernel"]
lowerCamelCase__: Any =tax_enc_dec_attention_module["out"]["kernel"]
lowerCamelCase__: str =tax_enc_dec_attention_module["query"]["kernel"]
lowerCamelCase__: Optional[Any] =tax_enc_dec_attention_module["value"]["kernel"]
# Layer Normalization
lowerCamelCase__: Optional[Any] =tax_model["target"]["decoder"][layer_name]["pre_cross_attention_layer_norm"]["scale"]
# MLP
if split_mlp_wi:
lowerCamelCase__: List[str] =tax_model["target"]["decoder"][layer_name]["mlp"]["wi_0"]["kernel"]
lowerCamelCase__: Dict =tax_model["target"]["decoder"][layer_name]["mlp"]["wi_1"]["kernel"]
else:
lowerCamelCase__: str =tax_model["target"]["decoder"][layer_name]["mlp"]["wi"]["kernel"]
lowerCamelCase__: List[str] =tax_model["target"]["decoder"][layer_name]["mlp"]["wo"]["kernel"]
# Layer Normalization
lowerCamelCase__: int =tax_model["target"]["decoder"][layer_name]["pre_mlp_layer_norm"]["scale"]
# Assigning
lowerCamelCase__: Optional[int] =flax_model.params["decoder"]["block"][str(__a )]["layer"]
lowerCamelCase__: List[Any] =tax_attention_key
lowerCamelCase__: Tuple =tax_attention_out
lowerCamelCase__: Dict =tax_attention_query
lowerCamelCase__: Tuple =tax_attention_value
lowerCamelCase__: Tuple =tax_pre_attention_layer_norm
lowerCamelCase__: Union[str, Any] =tax_enc_dec_attention_key
lowerCamelCase__: Union[str, Any] =tax_enc_dec_attention_out
lowerCamelCase__: List[str] =tax_enc_dec_attention_query
lowerCamelCase__: Optional[Any] =tax_enc_dec_attention_value
lowerCamelCase__: Tuple =tax_cross_layer_norm
if split_mlp_wi:
lowerCamelCase__: Union[str, Any] =tax_mlp_wi_a
lowerCamelCase__: int =tax_mlp_wi_a
else:
lowerCamelCase__: int =tax_mlp_wi
lowerCamelCase__: Optional[Any] =tax_mlp_wo
lowerCamelCase__: List[Any] =txa_mlp_layer_norm
lowerCamelCase__: Union[str, Any] =flax_model_decoder_layer_block
# Decoder Normalization
lowerCamelCase__: Any =tax_model["target"]["decoder"]["decoder_norm"]["scale"]
lowerCamelCase__: Dict =txa_decoder_norm
# Only for layer 0:
lowerCamelCase__: Optional[Any] =tax_model["target"]["decoder"]["relpos_bias"]["rel_embedding"].T
lowerCamelCase__: List[str] =tax_decoder_rel_embedding
# Token Embeddings
lowerCamelCase__: Union[str, Any] =tax_model["target"]["token_embedder"]["embedding"]
lowerCamelCase__: Dict =txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
lowerCamelCase__: List[Any] =tax_model["target"]["decoder"]["logits_dense"]["kernel"]
flax_model.save_pretrained(__a )
print("T5X Model was sucessfully converted!" )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path the T5X checkpoint."
)
parser.add_argument("--config_name", default=None, type=str, required=True, help="Config name of LongT5/T5 model.")
parser.add_argument(
"--flax_dump_folder_path", default=None, type=str, required=True, help="Path to the output FLAX model."
)
__A = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 703
|
def lowerCAmelCase_ ( __a , __a ) -> float:
"""simple docstring"""
if density <= 0:
raise ValueError("Impossible fluid density" )
if bulk_modulus <= 0:
raise ValueError("Impossible bulk modulus" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 437
| 0
|
from pathlib import Path
import fire
from tqdm import tqdm
def lowercase__ ( _UpperCamelCase="ro" , _UpperCamelCase="en" , _UpperCamelCase="wmt16" , _UpperCamelCase=None) -> None:
"""simple docstring"""
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('run pip install datasets')
UpperCamelCase = F'{src_lang}-{tgt_lang}'
print(F'Converting {dataset}-{pair}')
UpperCamelCase = datasets.load_dataset(_UpperCamelCase , _UpperCamelCase)
if save_dir is None:
UpperCamelCase = F'{dataset}-{pair}'
UpperCamelCase = Path(_UpperCamelCase)
save_dir.mkdir(exist_ok=_UpperCamelCase)
for split in ds.keys():
print(F'Splitting {split} with {ds[split].num_rows} records')
# to save to val.source, val.target like summary datasets
UpperCamelCase = 'val' if split == 'validation' else split
UpperCamelCase = save_dir.joinpath(F'{fn}.source')
UpperCamelCase = save_dir.joinpath(F'{fn}.target')
UpperCamelCase = src_path.open('w+')
UpperCamelCase = tgt_path.open('w+')
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split]):
UpperCamelCase = x['translation']
src_fp.write(ex[src_lang] + '\n')
tgt_fp.write(ex[tgt_lang] + '\n')
print(F'Saved {dataset} dataset to {save_dir}')
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 280
|
def lowercase__ ( _UpperCamelCase , _UpperCamelCase) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 280
| 1
|
'''simple docstring'''
from __future__ import annotations
UpperCAmelCase__ :int = """Muhammad Umer Farooq"""
UpperCAmelCase__ :Dict = """MIT"""
UpperCAmelCase__ :Dict = """1.0.0"""
UpperCAmelCase__ :str = """Muhammad Umer Farooq"""
UpperCAmelCase__ :str = """contact@muhammadumerfarooq.me"""
UpperCAmelCase__ :Any = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
def __init__( self : Tuple , A__ : str ):
"""simple docstring"""
super().__init__()
__lowerCamelCase : list[str] = []
__lowerCamelCase : Optional[int] = domain
def a_ ( self : Optional[int] , A__ : str , A__ : list[tuple[str, str | None]] ):
"""simple docstring"""
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__lowerCamelCase : Dict = parse.urljoin(self.domain , A__ )
self.urls.append(A__ )
def __lowercase (_lowercase ) -> str:
"""simple docstring"""
return ".".join(get_sub_domain_name(_lowercase ).split(""".""" )[-2:] )
def __lowercase (_lowercase ) -> str:
"""simple docstring"""
return parse.urlparse(_lowercase ).netloc
def __lowercase (_lowercase = "https://github.com" ) -> list[str]:
"""simple docstring"""
__lowerCamelCase : List[str] = get_domain_name(_lowercase )
# Initialize the parser
__lowerCamelCase : Any = Parser(_lowercase )
try:
# Open URL
__lowerCamelCase : Any = requests.get(_lowercase )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__lowerCamelCase : Any = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__lowerCamelCase : Tuple = requests.get(_lowercase )
# Get the valid email.
__lowerCamelCase : Optional[Any] = re.findall("""[a-zA-Z0-9]+@""" + domain, read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(_lowercase )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(_lowercase )
if __name__ == "__main__":
UpperCAmelCase__ :int = emails_from_url("""https://github.com""")
print(f'''{len(emails)} emails found:''')
print("""\n""".join(sorted(emails)))
| 711
|
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCAmelCase__ :List[str] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCAmelCase__ :Optional[int] = typing.Union[np.floataa, int, float] # noqa: UP007
def __lowercase (_lowercase, _lowercase ) -> VectorOut:
"""simple docstring"""
return np.sqrt(np.sum((np.asarray(_lowercase ) - np.asarray(_lowercase )) ** 2 ) )
def __lowercase (_lowercase, _lowercase ) -> VectorOut:
"""simple docstring"""
return sum((va - va) ** 2 for va, va in zip(_lowercase, _lowercase ) ) ** (1 / 2)
if __name__ == "__main__":
def __lowercase () -> None:
"""simple docstring"""
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""", number=10_000, globals=globals(), ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""", number=10_000, globals=globals(), ) )
benchmark()
| 483
| 0
|
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _lowerCamelCase:
lowercase_ : Tuple = MBartConfig
lowercase_ : Optional[Any] = {}
lowercase_ : List[Any] = """gelu"""
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=99, lowerCamelCase=32, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase=37, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=20, lowerCamelCase=2, lowerCamelCase=1, lowerCamelCase=0, ) -> Optional[int]:
"""simple docstring"""
_lowercase : str = parent
_lowercase : str = batch_size
_lowercase : Union[str, Any] = seq_length
_lowercase : Dict = is_training
_lowercase : str = use_labels
_lowercase : List[Any] = vocab_size
_lowercase : Dict = hidden_size
_lowercase : Tuple = num_hidden_layers
_lowercase : int = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : Tuple = hidden_dropout_prob
_lowercase : int = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : Optional[Any] = eos_token_id
_lowercase : Optional[int] = pad_token_id
_lowercase : Any = bos_token_id
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
_lowercase : List[str] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
_lowercase : Optional[int] = tf.concat([input_ids, eos_tensor], axis=1)
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : Dict = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
_lowercase : str = prepare_mbart_inputs_dict(lowerCamelCase, lowerCamelCase, lowerCamelCase)
return config, inputs_dict
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase) -> Any:
"""simple docstring"""
_lowercase : str = TFMBartModel(config=lowerCamelCase).get_decoder()
_lowercase : Union[str, Any] = inputs_dict['input_ids']
_lowercase : str = input_ids[:1, :]
_lowercase : Optional[int] = inputs_dict['attention_mask'][:1, :]
_lowercase : Tuple = inputs_dict['head_mask']
_lowercase : Tuple = 1
# first forward pass
_lowercase : Union[str, Any] = model(lowerCamelCase, attention_mask=lowerCamelCase, head_mask=lowerCamelCase, use_cache=lowerCamelCase)
_lowercase , _lowercase : Union[str, Any] = outputs.to_tuple()
_lowercase : Optional[int] = past_key_values[1]
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , ) -> Optional[Any]:
if attention_mask is None:
_lowercase : Dict = tf.cast(tf.math.not_equal(lowerCamelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_lowercase : List[Any] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_lowercase : List[Any] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_lowercase : int = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_lowercase : Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCamelCase( _a, _a, unittest.TestCase ):
lowercase_ : Optional[int] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
lowercase_ : Optional[Any] = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
lowercase_ : Optional[int] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowercase_ : Optional[Any] = True
lowercase_ : Union[str, Any] = False
lowercase_ : Dict = False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> List[Any]:
"""simple docstring"""
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : List[str] = TFMBartModelTester(self)
_lowercase : List[Any] = ConfigTester(self, config_class=lowerCamelCase)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCamelCase)
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCamelCase( unittest.TestCase ):
lowercase_ : Tuple = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
lowercase_ : int = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
lowercase_ : Optional[Any] = """facebook/mbart-large-en-ro"""
@cached_property
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Union[str, Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : str = self.translate_src_text(**lowerCamelCase)
self.assertListEqual(self.expected_text, lowerCamelCase)
def UpperCamelCase ( self, **lowerCamelCase) -> List[str]:
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer(self.src_text, **lowerCamelCase, return_tensors='tf')
_lowercase : int = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2)
_lowercase : List[Any] = self.tokenizer.batch_decode(lowerCamelCase, skip_special_tokens=lowerCamelCase)
return generated_words
@slow
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 89
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase:
def __init__( self, lowerCamelCase, lowerCamelCase=13, lowerCamelCase=7, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=True, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=False, lowerCamelCase=2, lowerCamelCase=99, lowerCamelCase=0, lowerCamelCase=32, lowerCamelCase=5, lowerCamelCase=4, lowerCamelCase=0.1, lowerCamelCase=0.1, lowerCamelCase=5_12, lowerCamelCase=2, lowerCamelCase=0.0_2, lowerCamelCase=2, lowerCamelCase=4, lowerCamelCase="last", lowerCamelCase=True, lowerCamelCase=None, lowerCamelCase=0, ) -> str:
"""simple docstring"""
_lowercase : Union[str, Any] = parent
_lowercase : Optional[Any] = batch_size
_lowercase : List[str] = seq_length
_lowercase : int = is_training
_lowercase : List[str] = use_input_lengths
_lowercase : int = use_token_type_ids
_lowercase : Any = use_labels
_lowercase : Union[str, Any] = gelu_activation
_lowercase : List[str] = sinusoidal_embeddings
_lowercase : str = causal
_lowercase : Optional[int] = asm
_lowercase : Union[str, Any] = n_langs
_lowercase : List[Any] = vocab_size
_lowercase : Any = n_special
_lowercase : Any = hidden_size
_lowercase : str = num_hidden_layers
_lowercase : Union[str, Any] = num_attention_heads
_lowercase : Tuple = hidden_dropout_prob
_lowercase : Optional[int] = attention_probs_dropout_prob
_lowercase : Union[str, Any] = max_position_embeddings
_lowercase : List[str] = type_sequence_label_size
_lowercase : Any = initializer_range
_lowercase : int = num_labels
_lowercase : Optional[int] = num_choices
_lowercase : Optional[Any] = summary_type
_lowercase : Optional[Any] = use_proj
_lowercase : int = scope
_lowercase : List[Any] = bos_token_id
def UpperCamelCase ( self) -> Optional[int]:
"""simple docstring"""
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length])
_lowercase : int = None
if self.use_input_lengths:
_lowercase : Dict = (
ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
_lowercase : Tuple = None
if self.use_token_type_ids:
_lowercase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.n_langs)
_lowercase : Tuple = None
_lowercase : int = None
_lowercase : int = None
if self.use_labels:
_lowercase : str = ids_tensor([self.batch_size], self.type_sequence_label_size)
_lowercase : str = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
_lowercase : Dict = ids_tensor([self.batch_size], 2).float()
_lowercase : Tuple = ids_tensor([self.batch_size], self.num_choices)
_lowercase : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, num_labels=self.num_labels, bos_token_id=self.bos_token_id, )
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Tuple:
"""simple docstring"""
_lowercase : List[Any] = XLMModel(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : str = model(lowerCamelCase, lengths=lowerCamelCase, langs=lowerCamelCase)
_lowercase : int = model(lowerCamelCase, langs=lowerCamelCase)
_lowercase : Any = model(lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[Any]:
"""simple docstring"""
_lowercase : Dict = XLMWithLMHeadModel(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> str:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnsweringSimple(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Dict = model(lowerCamelCase)
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
_lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMForQuestionAnswering(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = model(lowerCamelCase)
_lowercase : List[Any] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, p_mask=lowerCamelCase, )
_lowercase : List[str] = model(
lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase, cls_index=lowerCamelCase, is_impossible=lowerCamelCase, )
((_lowercase) , ) : Optional[Any] = result_with_labels.to_tuple()
_lowercase : List[str] = model(lowerCamelCase, start_positions=lowerCamelCase, end_positions=lowerCamelCase)
((_lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape, ())
self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> int:
"""simple docstring"""
_lowercase : Optional[Any] = XLMForSequenceClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[int] = model(lowerCamelCase)
_lowercase : Optional[int] = model(lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.loss.shape, ())
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> List[str]:
"""simple docstring"""
_lowercase : Any = self.num_labels
_lowercase : str = XLMForTokenClassification(lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : int = model(lowerCamelCase, attention_mask=lowerCamelCase, labels=lowerCamelCase)
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, ) -> Dict:
"""simple docstring"""
_lowercase : Optional[Any] = self.num_choices
_lowercase : Optional[int] = XLMForMultipleChoice(config=lowerCamelCase)
model.to(lowerCamelCase)
model.eval()
_lowercase : Optional[Any] = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : int = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : Optional[Any] = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous()
_lowercase : List[str] = model(
lowerCamelCase, attention_mask=lowerCamelCase, token_type_ids=lowerCamelCase, labels=lowerCamelCase, )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices))
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
_lowercase : Dict = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Optional[Any] = config_and_inputs
_lowercase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class _lowerCamelCase( _a, _a, _a, unittest.TestCase ):
lowercase_ : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
lowercase_ : Optional[int] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowercase_ : Union[str, Any] = (
{
"""feature-extraction""": XLMModel,
"""fill-mask""": XLMWithLMHeadModel,
"""question-answering""": XLMForQuestionAnsweringSimple,
"""text-classification""": XLMForSequenceClassification,
"""text-generation""": XLMWithLMHeadModel,
"""token-classification""": XLMForTokenClassification,
"""zero-shot""": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase) -> Optional[int]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase=False) -> Optional[int]:
"""simple docstring"""
_lowercase : Any = super()._prepare_for_class(lowerCamelCase, lowerCamelCase, return_labels=lowerCamelCase)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
_lowercase : Any = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
_lowercase : Dict = torch.zeros(
self.model_tester.batch_size, dtype=torch.long, device=lowerCamelCase)
return inputs_dict
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : Union[str, Any] = XLMModelTester(self)
_lowercase : List[str] = ConfigTester(self, config_class=lowerCamelCase, emb_dim=37)
def UpperCamelCase ( self) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self) -> List[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*lowerCamelCase)
def UpperCamelCase ( self) -> Optional[Any]:
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> int:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*lowerCamelCase)
def UpperCamelCase ( self) -> List[str]:
"""simple docstring"""
_lowercase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Dict:
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*lowerCamelCase)
def UpperCamelCase ( self) -> Any:
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*lowerCamelCase)
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> int:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_attentions in attentions], [True] * len(lowerCamelCase))
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : Dict = min_length + idx + 1
_lowercase : int = min_length + idx + 1
_lowercase : Dict = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(lowerCamelCase))
def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase=False, lowerCamelCase=1) -> List[Any]:
"""simple docstring"""
self.assertIsInstance(lowerCamelCase, lowerCamelCase)
self.assertListEqual(
[isinstance(lowerCamelCase, lowerCamelCase) for iter_hidden_states in hidden_states], [True] * len(lowerCamelCase), )
self.assertEqual(len(lowerCamelCase), (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(lowerCamelCase):
# adds PAD dummy token
_lowercase : int = min_length + idx + 1
_lowercase : int = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(lowerCamelCase), )
pass
@slow
def UpperCamelCase ( self) -> int:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Dict = XLMModel.from_pretrained(lowerCamelCase)
self.assertIsNotNone(lowerCamelCase)
@require_torch
class _lowerCamelCase( unittest.TestCase ):
@slow
def UpperCamelCase ( self) -> Union[str, Any]:
"""simple docstring"""
_lowercase : Tuple = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(lowerCamelCase)
_lowercase : Union[str, Any] = torch.tensor([[14, 4_47]], dtype=torch.long, device=lowerCamelCase) # the president
_lowercase : Any = [
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
14,
4_47,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
_lowercase : str = model.generate(lowerCamelCase, do_sample=lowerCamelCase)
self.assertListEqual(output_ids[0].cpu().numpy().tolist(), lowerCamelCase)
| 89
| 1
|
'''simple docstring'''
from .glue import GlueDataset, GlueDataTrainingArguments
from .language_modeling import (
LineByLineTextDataset,
LineByLineWithRefDataset,
LineByLineWithSOPTextDataset,
TextDataset,
TextDatasetForNextSentencePrediction,
)
from .squad import SquadDataset, SquadDataTrainingArguments
| 581
|
'''simple docstring'''
from collections import deque
from math import floor
from random import random
from time import time
class a__:
def __init__( self ) -> List[Any]:
snake_case__ ={}
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ) -> Tuple:
if self.graph.get(_UpperCAmelCase ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
snake_case__ =[[w, v]]
if not self.graph.get(_UpperCAmelCase ):
snake_case__ =[]
def _lowercase ( self ) -> Optional[int]:
return list(self.graph )
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ) -> int:
if s == d:
return []
snake_case__ =[]
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def _lowercase ( self , _UpperCAmelCase=-1 ) -> Optional[int]:
if c == -1:
snake_case__ =floor(random() * 1_0000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case__ =floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def _lowercase ( self , _UpperCAmelCase=-2 ) -> Optional[Any]:
snake_case__ =deque()
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
snake_case__ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowercase ( self , _UpperCAmelCase ) -> List[str]:
snake_case__ =0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _lowercase ( self , _UpperCAmelCase ) -> Optional[int]:
return len(self.graph[u] )
def _lowercase ( self , _UpperCAmelCase=-2 ) -> Dict:
snake_case__ =[]
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =[]
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return sorted_nodes
def _lowercase ( self ) -> Optional[int]:
snake_case__ =[]
snake_case__ =[]
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =-2
snake_case__ =[]
snake_case__ =s
snake_case__ =False
snake_case__ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ =len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ =True
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =False
indirect_parents.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =[]
snake_case__ =[]
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =-2
snake_case__ =[]
snake_case__ =s
snake_case__ =False
snake_case__ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ =len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ =True
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =False
indirect_parents.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def _lowercase ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ) -> str:
snake_case__ =time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ =time()
return end - begin
def _lowercase ( self , _UpperCAmelCase=-2 ) -> Any:
snake_case__ =time()
self.bfs(_UpperCAmelCase )
snake_case__ =time()
return end - begin
class a__:
def __init__( self ) -> Tuple:
snake_case__ ={}
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=1 ) -> int:
# check if the u exists
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
snake_case__ =[[w, v]]
# add the other way
if self.graph.get(_UpperCAmelCase ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
snake_case__ =[[w, u]]
def _lowercase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(_UpperCAmelCase )
# the other way round
if self.graph.get(_UpperCAmelCase ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(_UpperCAmelCase )
def _lowercase ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ) -> int:
if s == d:
return []
snake_case__ =[]
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(_UpperCAmelCase )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return visited
def _lowercase ( self , _UpperCAmelCase=-1 ) -> Dict:
if c == -1:
snake_case__ =floor(random() * 1_0000 ) + 10
for i in range(_UpperCAmelCase ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
snake_case__ =floor(random() * c ) + 1
if n != i:
self.add_pair(_UpperCAmelCase , _UpperCAmelCase , 1 )
def _lowercase ( self , _UpperCAmelCase=-2 ) -> List[Any]:
snake_case__ =deque()
snake_case__ =[]
if s == -2:
snake_case__ =list(self.graph )[0]
d.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
while d:
snake_case__ =d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _lowercase ( self , _UpperCAmelCase ) -> str:
return len(self.graph[u] )
def _lowercase ( self ) -> Any:
snake_case__ =[]
snake_case__ =[]
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =-2
snake_case__ =[]
snake_case__ =s
snake_case__ =False
snake_case__ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ =len(_UpperCAmelCase ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ =True
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =False
indirect_parents.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return list(_UpperCAmelCase )
def _lowercase ( self ) -> List[str]:
snake_case__ =[]
snake_case__ =[]
snake_case__ =list(self.graph )[0]
stack.append(_UpperCAmelCase )
visited.append(_UpperCAmelCase )
snake_case__ =-2
snake_case__ =[]
snake_case__ =s
snake_case__ =False
snake_case__ =set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
snake_case__ =s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
snake_case__ =len(_UpperCAmelCase ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
snake_case__ =node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
snake_case__ =True
if len(_UpperCAmelCase ) != 0:
snake_case__ =stack[len(_UpperCAmelCase ) - 1]
else:
snake_case__ =False
indirect_parents.append(_UpperCAmelCase )
snake_case__ =s
snake_case__ =ss
# check if se have reached the starting point
if len(_UpperCAmelCase ) == 0:
return False
def _lowercase ( self ) -> Optional[Any]:
return list(self.graph )
def _lowercase ( self , _UpperCAmelCase=-2 , _UpperCAmelCase=-1 ) -> Any:
snake_case__ =time()
self.dfs(_UpperCAmelCase , _UpperCAmelCase )
snake_case__ =time()
return end - begin
def _lowercase ( self , _UpperCAmelCase=-2 ) -> Union[str, Any]:
snake_case__ =time()
self.bfs(_UpperCAmelCase )
snake_case__ =time()
return end - begin
| 581
| 1
|
class lowerCamelCase_ :
def __init__( self : Dict , __A : Tuple , __A : Optional[int] , __A : int ):
__A : List[str] = name
__A : Optional[int] = value
__A : Optional[Any] = weight
def __repr__( self : Any ):
return F"""{self.__class__.__name__}({self.name}, {self.value}, {self.weight})"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.value
def lowerCAmelCase_ ( self : str ):
return self.name
def lowerCAmelCase_ ( self : str ):
return self.weight
def lowerCAmelCase_ ( self : Dict ):
return self.value / self.weight
def __SCREAMING_SNAKE_CASE ( a__ : str ,a__ : Optional[int] ,a__ : Union[str, Any] ) -> int:
__A : Tuple = []
for i in range(len(a__ ) ):
menu.append(Things(name[i] ,value[i] ,weight[i] ) )
return menu
def __SCREAMING_SNAKE_CASE ( a__ : Tuple ,a__ : Any ,a__ : Optional[int] ) -> Tuple:
__A : Optional[int] = sorted(a__ ,key=a__ ,reverse=a__ )
__A : Optional[Any] = []
__A , __A : Tuple = 0.0, 0.0
for i in range(len(a__ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __SCREAMING_SNAKE_CASE ( ) -> List[Any]:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17
|
'''simple docstring'''
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("hub/hopper-medium-v2/unet/hor32", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/unet/hor128", exist_ok=True)
os.makedirs("hub/hopper-medium-v2/value_function", exist_ok=True)
def _UpperCamelCase ( __UpperCamelCase ) -> List[str]:
if hor == 1_28:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCamelCase_ = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCamelCase_ = (32, 64, 1_28, 2_56)
lowerCamelCase_ = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCamelCase_ = torch.load(f'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCamelCase_ = model.state_dict()
lowerCamelCase_ = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 6_55_36,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(model.state_dict().keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,f'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(f'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Tuple:
lowerCamelCase_ = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 1_28, 2_56),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 6_55_36,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCamelCase_ = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCamelCase_ = model
lowerCamelCase_ = UNetaDModel(**__UpperCamelCase )
print(f'''length of state dict: {len(state_dict.keys() )}''' )
print(f'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCamelCase_ = dict(zip(state_dict.keys() ,hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase_ = state_dict.pop(__UpperCamelCase )
hf_value_function.load_state_dict(__UpperCamelCase )
torch.save(hf_value_function.state_dict() ,'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' ,'w' ) as f:
json.dump(__UpperCamelCase ,__UpperCamelCase )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 42
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json",
"studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json",
}
class UpperCAmelCase_ ( __a):
'''simple docstring'''
__UpperCamelCase : List[Any] = "luke"
def __init__( self , __SCREAMING_SNAKE_CASE=50_267 , __SCREAMING_SNAKE_CASE=500_000 , __SCREAMING_SNAKE_CASE=768 , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=12 , __SCREAMING_SNAKE_CASE=3_072 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=512 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-12 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=0 , __SCREAMING_SNAKE_CASE=2 , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , **a_ )
UpperCamelCase : Optional[int] = vocab_size
UpperCamelCase : Any = entity_vocab_size
UpperCamelCase : Optional[Any] = hidden_size
UpperCamelCase : Union[str, Any] = entity_emb_size
UpperCamelCase : str = num_hidden_layers
UpperCamelCase : Union[str, Any] = num_attention_heads
UpperCamelCase : Any = hidden_act
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : Any = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : List[Any] = type_vocab_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : Optional[Any] = layer_norm_eps
UpperCamelCase : Dict = use_entity_aware_attention
UpperCamelCase : List[Any] = classifier_dropout
| 706
|
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def a ( SCREAMING_SNAKE_CASE_ : bool = True , *SCREAMING_SNAKE_CASE_ : List[str] , **SCREAMING_SNAKE_CASE_ : Tuple ):
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
UpperCamelCase : int = False
if main_process_only:
UpperCamelCase : int = PartialState().local_process_index == 0
return _tqdm(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , disable=SCREAMING_SNAKE_CASE_ )
| 643
| 0
|
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
'''simple docstring'''
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1_3 , __SCREAMING_SNAKE_CASE=7 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=9_9 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=3_7 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=1_2_8 , __SCREAMING_SNAKE_CASE=3_2 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=4 , __SCREAMING_SNAKE_CASE=None , ):
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Tuple = seq_length
snake_case__ : Tuple = is_training
snake_case__ : int = use_input_mask
snake_case__ : Tuple = use_token_type_ids
snake_case__ : int = use_labels
snake_case__ : Dict = vocab_size
snake_case__ : List[str] = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Dict = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : str = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : Dict = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : Tuple = num_labels
snake_case__ : str = num_choices
snake_case__ : Optional[int] = scope
def __UpperCamelCase ( self ):
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = None
if self.use_input_mask:
snake_case__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : int = None
snake_case__ : Union[str, Any] = None
snake_case__ : Dict = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self ):
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self ):
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Dict = self.prepare_config_and_inputs()
snake_case__ : Optional[Any] = True
snake_case__ : Optional[int] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = NezhaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : str = model(__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ):
snake_case__ : str = True
snake_case__ : Dict = NezhaModel(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Union[str, Any] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , encoder_attention_mask=__SCREAMING_SNAKE_CASE , )
snake_case__ : int = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , )
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] = NezhaForMaskedLM(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : str = NezhaForNextSentencePrediction(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : List[str] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : List[Any] = NezhaForPreTraining(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , next_sentence_label=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = NezhaForQuestionAnswering(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , start_positions=__SCREAMING_SNAKE_CASE , end_positions=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.num_labels
snake_case__ : Any = NezhaForSequenceClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : int = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : int = self.num_labels
snake_case__ : int = NezhaForTokenClassification(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Tuple = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
snake_case__ : Any = self.num_choices
snake_case__ : Tuple = NezhaForMultipleChoice(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = model(
__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , token_type_ids=__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self ):
snake_case__ : str = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : int = config_and_inputs
snake_case__ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( snake_case__ , snake_case__ , snake_case__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = (
{
'''feature-extraction''': NezhaModel,
'''fill-mask''': NezhaForMaskedLM,
'''question-answering''': NezhaForQuestionAnswering,
'''text-classification''': NezhaForSequenceClassification,
'''token-classification''': NezhaForTokenClassification,
'''zero-shot''': NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase__ = True
def __UpperCamelCase ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ):
snake_case__ : Union[str, Any] = super()._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(__SCREAMING_SNAKE_CASE ):
snake_case__ : Optional[int] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
snake_case__ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
return inputs_dict
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = NezhaModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : List[str] = None
self.model_tester.create_and_check_model_as_decoder(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( self ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = NezhaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def __UpperCamelCase ( self ):
snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
snake_case__ : List[Any] = True
snake_case__ : Optional[int] = model_class(config=__SCREAMING_SNAKE_CASE )
snake_case__ : Dict = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
snake_case__ : int = torch.jit.trace(
__SCREAMING_SNAKE_CASE , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , """bert.pt""" ) )
snake_case__ : List[str] = torch.jit.load(os.path.join(__SCREAMING_SNAKE_CASE , """bert.pt""" ) , map_location=__SCREAMING_SNAKE_CASE )
loaded(inputs_dict["""input_ids"""].to(__SCREAMING_SNAKE_CASE ) , inputs_dict["""attention_mask"""].to(__SCREAMING_SNAKE_CASE ) )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCamelCase ( self ):
snake_case__ : Any = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
snake_case__ : Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : List[Any] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : str = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
def __UpperCamelCase ( self ):
snake_case__ : List[Any] = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
snake_case__ : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case__ : List[Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : List[str] = model(__SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE )[0]
snake_case__ : Union[str, Any] = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] = torch.tensor(
[[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) )
| 38
|
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__a : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , "steps_offset" ) and scheduler.config.steps_offset != 1:
UpperCamelCase = (
f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'''
f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '''
"to update the config accordingly as leaving `steps_offset` might led to incorrect results"
" in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
" it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
" file"
)
deprecate("steps_offset!=1" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(scheduler.config )
UpperCamelCase = 1
UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE )
if hasattr(scheduler.config , "skip_prk_steps" ) and scheduler.config.skip_prk_steps is False:
UpperCamelCase = (
f'''The configuration file of this scheduler: {scheduler} has not set the configuration'''
" `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
" sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
" incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
" Hub, it would be very nice if you could open a Pull request for the"
" `scheduler/scheduler_config.json` file"
)
deprecate("skip_prk_steps not set" , "1.0.0" , SCREAMING_SNAKE_CASE , standard_warn=SCREAMING_SNAKE_CASE )
UpperCamelCase = dict(scheduler.config )
UpperCamelCase = True
UpperCamelCase = FrozenDict(SCREAMING_SNAKE_CASE )
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
segmentation_model=SCREAMING_SNAKE_CASE , segmentation_processor=SCREAMING_SNAKE_CASE , vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , safety_checker=SCREAMING_SNAKE_CASE , feature_extractor=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE = "auto" ) -> Optional[Any]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
self.enable_attention_slicing(SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
UpperCamelCase = torch.device("cuda" )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
if self.device != torch.device("meta" ) or not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(SCREAMING_SNAKE_CASE , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 512 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = 7.5 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = "pil" , SCREAMING_SNAKE_CASE = True , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
UpperCamelCase = self.segmentation_processor(
text=[text] , images=[image] , padding="max_length" , return_tensors="pt" ).to(self.device )
UpperCamelCase = self.segmentation_model(**SCREAMING_SNAKE_CASE )
UpperCamelCase = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCamelCase = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=SCREAMING_SNAKE_CASE , image=SCREAMING_SNAKE_CASE , mask_image=SCREAMING_SNAKE_CASE , height=SCREAMING_SNAKE_CASE , width=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , output_type=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE , callback=SCREAMING_SNAKE_CASE , callback_steps=SCREAMING_SNAKE_CASE , )
| 606
| 0
|
"""simple docstring"""
def __lowerCamelCase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise ValueError('multiplicative_persistence() only accepts integral values' )
if num < 0:
raise ValueError('multiplicative_persistence() does not accept negative values' )
A__ = 0
A__ = str(lowerCAmelCase__ )
while len(lowerCAmelCase__ ) != 1:
A__ = [int(lowerCAmelCase__ ) for i in num_string]
A__ = 1
for i in range(0 ,len(lowerCAmelCase__ ) ):
total *= numbers[i]
A__ = str(lowerCAmelCase__ )
steps += 1
return steps
def __lowerCamelCase ( lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
raise ValueError('additive_persistence() only accepts integral values' )
if num < 0:
raise ValueError('additive_persistence() does not accept negative values' )
A__ = 0
A__ = str(lowerCAmelCase__ )
while len(lowerCAmelCase__ ) != 1:
A__ = [int(lowerCAmelCase__ ) for i in num_string]
A__ = 0
for i in range(0 ,len(lowerCAmelCase__ ) ):
total += numbers[i]
A__ = str(lowerCAmelCase__ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class snake_case_ ( metaclass=_lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_: List[str] = ["""note_seq"""]
def __init__( self , *__a , **__a ):
"""simple docstring"""
requires_backends(self , ['note_seq'] )
@classmethod
def _UpperCAmelCase ( cls , *__a , **__a ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
@classmethod
def _UpperCAmelCase ( cls , *__a , **__a ):
"""simple docstring"""
requires_backends(cls , ['note_seq'] )
| 554
| 0
|
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class A__:
"""simple docstring"""
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.0_2 , _lowercase=3 , _lowercase=4 , _lowercase=None , _lowercase=1_000 , ) -> List[Any]:
a_ : Dict = parent
a_ : Tuple = batch_size
a_ : Union[str, Any] = seq_length
a_ : Any = is_training
a_ : List[Any] = use_input_mask
a_ : int = use_token_type_ids
a_ : Tuple = use_labels
a_ : str = vocab_size
a_ : List[Any] = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : List[str] = num_attention_heads
a_ : Optional[int] = intermediate_size
a_ : int = hidden_act
a_ : List[Any] = hidden_dropout_prob
a_ : int = attention_probs_dropout_prob
a_ : str = max_position_embeddings
a_ : List[str] = type_vocab_size
a_ : List[Any] = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : str = num_labels
a_ : Optional[Any] = num_choices
a_ : Any = scope
a_ : str = range_bbox
def UpperCamelCase__ ( self ) -> List[str]:
a_ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
a_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ : Any = bbox[i, j, 3]
a_ : List[Any] = bbox[i, j, 1]
a_ : Any = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ : Tuple = bbox[i, j, 2]
a_ : Optional[int] = bbox[i, j, 0]
a_ : int = t
a_ : List[Any] = tf.convert_to_tensor(_lowercase )
a_ : Tuple = None
if self.use_input_mask:
a_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
a_ : Optional[int] = None
if self.use_token_type_ids:
a_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
a_ : int = None
a_ : List[str] = None
a_ : Optional[Any] = None
if self.use_labels:
a_ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
a_ : Dict = LayoutLMConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
a_ : Tuple = TFLayoutLMModel(config=_lowercase )
a_ : Any = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
a_ : List[Any] = model(_lowercase , _lowercase , token_type_ids=_lowercase )
a_ : Union[str, Any] = model(_lowercase , _lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Dict:
a_ : List[str] = TFLayoutLMForMaskedLM(config=_lowercase )
a_ : Optional[Any] = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
a_ : Optional[int] = self.num_labels
a_ : Optional[int] = TFLayoutLMForSequenceClassification(config=_lowercase )
a_ : Any = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
a_ : Optional[Any] = self.num_labels
a_ : List[str] = TFLayoutLMForTokenClassification(config=_lowercase )
a_ : Union[str, Any] = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> List[str]:
a_ : Union[str, Any] = TFLayoutLMForQuestionAnswering(config=_lowercase )
a_ : Any = model(_lowercase , _lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self ) -> Tuple:
a_ : Any = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) : List[str] = config_and_inputs
a_ : List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_tf
class A__(_a, _a, unittest.TestCase ):
"""simple docstring"""
_A : str = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
_A : str = (
{
'''feature-extraction''': TFLayoutLMModel,
'''fill-mask''': TFLayoutLMForMaskedLM,
'''text-classification''': TFLayoutLMForSequenceClassification,
'''token-classification''': TFLayoutLMForTokenClassification,
'''zero-shot''': TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : Optional[Any] = True
_A : Dict = 10
def UpperCamelCase__ ( self ) -> Dict:
a_ : Any = TFLayoutLMModelTester(self )
a_ : int = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Dict:
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCamelCase__ ( self ) -> Any:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def UpperCamelCase__ ( self ) -> str:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def UpperCamelCase__ ( self ) -> Tuple:
a_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
def UpperCamelCase__ ( self ) -> Dict:
a_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
@slow
def UpperCamelCase__ ( self ) -> List[str]:
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : int = TFLayoutLMModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skip("""Onnx compliancy broke with TF 2.10""" )
def UpperCamelCase__ ( self ) -> Dict:
pass
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : Any = tf.convert_to_tensor([[1_0_1,1_0_1_9,1_0_1_4,1_0_1_6,1_0_3_7,1_2_8_4_9,4_7_4_7,1_0_0_4,1_4_2_4_6,2_2_7_8,5_4_3_9,4_5_2_4,5_0_0_2,2_9_3_0,2_1_9_3,2_9_3_0,4_3_4_1,3_2_0_8,1_0_0_5,1_0_5_5,2_1_7_1,2_8_4_8,1_1_3_0_0,3_5_3_1,1_0_2],[1_0_1,4_0_7_0,4_0_3_4,7_0_2_0,1_0_2_4,3_0_5_8,1_0_1_5,1_0_1_3,2_8_6_1,1_0_1_3,6_0_7_0,1_9_2_7_4,2_7_7_2,6_2_0_5,2_7_8_1_4,1_6_1_4_7,1_6_1_4_7,4_3_4_3,2_0_4_7,1_0_2_8_3,1_0_9_6_9,1_4_3_8_9,1_0_1_2,2_3_3_8,1_0_2]]) # noqa: E231
a_ : Tuple = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],]) # noqa: E231
a_ : Tuple = tf.convert_to_tensor([[[0,0,0,0],[4_2_3,2_3_7,4_4_0,2_5_1],[4_2_7,2_7_2,4_4_1,2_8_7],[4_1_9,1_1_5,4_3_7,1_2_9],[9_6_1,8_8_5,9_9_2,9_1_2],[2_5_6,3_8,3_3_0,5_8],[2_5_6,3_8,3_3_0,5_8],[3_3_6,4_2,3_5_3,5_7],[3_6_0,3_9,4_0_1,5_6],[3_6_0,3_9,4_0_1,5_6],[4_1_1,3_9,4_7_1,5_9],[4_7_9,4_1,5_2_8,5_9],[5_3_3,3_9,6_3_0,6_0],[6_7,1_1_3,1_3_4,1_3_1],[1_4_1,1_1_5,2_0_9,1_3_2],[6_8,1_4_9,1_3_3,1_6_6],[1_4_1,1_4_9,1_8_7,1_6_4],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[1_9_5,1_4_8,2_8_7,1_6_5],[2_9_5,1_4_8,3_4_9,1_6_5],[4_4_1,1_4_9,4_9_2,1_6_6],[4_9_7,1_4_9,5_4_6,1_6_4],[6_4,2_0_1,1_2_5,2_1_8],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]],[[0,0,0,0],[6_6_2,1_5_0,7_5_4,1_6_6],[6_6_5,1_9_9,7_4_2,2_1_1],[5_1_9,2_1_3,5_5_4,2_2_8],[5_1_9,2_1_3,5_5_4,2_2_8],[1_3_4,4_3_3,1_8_7,4_5_4],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[1_3_0,4_6_7,2_0_4,4_8_0],[3_1_4,4_6_9,3_7_6,4_8_2],[5_0_4,6_8_4,5_8_2,7_0_6],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[9_4_1,8_2_5,9_7_3,9_0_0],[6_1_0,7_4_9,6_5_2,7_6_5],[1_3_0,6_5_9,1_6_8,6_7_2],[1_7_6,6_5_7,2_3_7,6_7_2],[2_3_8,6_5_7,3_1_2,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[4_4_3,6_5_3,6_2_8,6_7_2],[7_1_6,3_0_1,8_2_5,3_1_7],[1_0_0_0,1_0_0_0,1_0_0_0,1_0_0_0]]]) # noqa: E231
a_ : Union[str, Any] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]) # noqa: E231
# these are sequence labels (i.e. at the token level)
a_ : int = tf.convert_to_tensor([[-1_0_0,1_0,1_0,1_0,9,1,-1_0_0,7,7,-1_0_0,7,7,4,2,5,2,8,8,-1_0_0,-1_0_0,5,0,3,2,-1_0_0],[-1_0_0,1_2,1_2,1_2,-1_0_0,1_2,1_0,-1_0_0,-1_0_0,-1_0_0,-1_0_0,1_0,1_2,9,-1_0_0,-1_0_0,-1_0_0,1_0,1_0,1_0,9,1_2,-1_0_0,1_0,-1_0_0]]) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class A__(unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ) -> str:
a_ : Optional[Any] = TFLayoutLMModel.from_pretrained("""microsoft/layoutlm-base-uncased""" )
a_ , a_ , a_ , a_ , a_ : Tuple = prepare_layoutlm_batch_inputs()
# forward pass
a_ : Any = model(input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
# test the sequence output on [0, :3, :3]
a_ : Dict = tf.convert_to_tensor(
[[0.1_7_8_5, -0.1_9_4_7, -0.0_4_2_5], [-0.3_2_5_4, -0.2_8_0_7, 0.2_5_5_3], [-0.5_3_9_1, -0.3_3_2_2, 0.3_3_6_4]] , )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _lowercase , atol=1e-3 ) )
# test the pooled output on [1, :3]
a_ : List[Any] = tf.convert_to_tensor([-0.6_5_8_0, -0.0_2_1_4, 0.8_5_5_2] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3] , _lowercase , atol=1e-3 ) )
@slow
def UpperCamelCase__ ( self ) -> Dict:
a_ : Any = TFLayoutLMForSequenceClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=2 )
a_ , a_ , a_ , a_ , a_ : Union[str, Any] = prepare_layoutlm_batch_inputs()
# forward pass
a_ : Dict = model(
input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=tf.convert_to_tensor([1, 1] ) , )
# test whether we get a loss as a scalar
a_ : Optional[Any] = outputs.loss
a_ : str = (2,)
self.assertEqual(loss.shape , _lowercase )
# test the shape of the logits
a_ : List[Any] = outputs.logits
a_ : Optional[int] = (2, 2)
self.assertEqual(logits.shape , _lowercase )
@slow
def UpperCamelCase__ ( self ) -> str:
a_ : Union[str, Any] = TFLayoutLMForTokenClassification.from_pretrained("""microsoft/layoutlm-base-uncased""" , num_labels=13 )
a_ , a_ , a_ , a_ , a_ : Any = prepare_layoutlm_batch_inputs()
# forward pass
a_ : Optional[int] = model(
input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , labels=_lowercase )
# test the shape of the logits
a_ : List[str] = outputs.logits
a_ : int = tf.convert_to_tensor((2, 25, 13) )
self.assertEqual(logits.shape , _lowercase )
@slow
def UpperCamelCase__ ( self ) -> int:
a_ : str = TFLayoutLMForQuestionAnswering.from_pretrained("""microsoft/layoutlm-base-uncased""" )
a_ , a_ , a_ , a_ , a_ : Optional[Any] = prepare_layoutlm_batch_inputs()
# forward pass
a_ : Optional[int] = model(input_ids=_lowercase , bbox=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase )
# test the shape of the logits
a_ : Tuple = tf.convert_to_tensor((2, 25) )
self.assertEqual(outputs.start_logits.shape , _lowercase )
self.assertEqual(outputs.end_logits.shape , _lowercase )
| 540
|
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
torch.manual_seed(0)
_lowercase =UNetaDModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=3, out_channels=3, down_block_types=('DownBlock2D', 'AttnDownBlock2D'), up_block_types=('AttnUpBlock2D', 'UpBlock2D'), )
return model
def UpperCamelCase__ ( self :str):
"""simple docstring"""
_lowercase =self.dummy_uncond_unet
_lowercase =ScoreSdeVeScheduler()
_lowercase =ScoreSdeVePipeline(unet=snake_case, scheduler=snake_case)
sde_ve.to(snake_case)
sde_ve.set_progress_bar_config(disable=snake_case)
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=2, output_type='numpy', generator=snake_case).images
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=2, output_type='numpy', generator=snake_case, return_dict=snake_case)[
0
]
_lowercase =image[0, -3:, -3:, -1]
_lowercase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_lowercase =np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase ='google/ncsnpp-church-256'
_lowercase =UNetaDModel.from_pretrained(snake_case)
_lowercase =ScoreSdeVeScheduler.from_pretrained(snake_case)
_lowercase =ScoreSdeVePipeline(unet=snake_case, scheduler=snake_case)
sde_ve.to(snake_case)
sde_ve.set_progress_bar_config(disable=snake_case)
_lowercase =torch.manual_seed(0)
_lowercase =sde_ve(num_inference_steps=10, output_type='numpy', generator=snake_case).images
_lowercase =image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_lowercase =np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
| 181
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 705
|
"""simple docstring"""
def lowercase__ ( lowerCAmelCase : int , lowerCAmelCase : int ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def lowercase__ ( ) -> None:
"""simple docstring"""
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 183
| 0
|
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
lowerCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
lowerCAmelCase : str = parser.parse_args()
lowerCAmelCase : Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
lowerCAmelCase : Any = CLIPImageProcessor()
lowerCAmelCase : Optional[int] = CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
lowerCAmelCase : Tuple = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 202
|
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
__UpperCAmelCase : List[str] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
__UpperCAmelCase : List[str] = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
__UpperCAmelCase : Union[str, Any] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
return float((preds == labels).mean() )
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : int = simple_accuracy(UpperCamelCase_ , UpperCamelCase_ )
_a : List[Any] = float(fa_score(y_true=UpperCamelCase_ , y_pred=UpperCamelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ ):
_a : str = float(pearsonr(UpperCamelCase_ , UpperCamelCase_ )[0] )
_a : str = float(spearmanr(UpperCamelCase_ , UpperCamelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
def snake_case_ ( self : Dict ) -> Union[str, Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def snake_case_ ( self : Optional[int] , __snake_case : Any , __snake_case : Any ) -> Union[str, Any]:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(__snake_case , __snake_case )}
elif self.config_name == "stsb":
return pearson_and_spearman(__snake_case , __snake_case )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(__snake_case , __snake_case )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(__snake_case , __snake_case )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
| 471
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"facebook/convnextv2-tiny-1k-224": "https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json",
}
class lowerCAmelCase_ ( lowercase , lowercase ):
"""simple docstring"""
_snake_case : List[str] = """convnextv2"""
def __init__( self :Tuple , lowerCamelCase__ :Tuple=3 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :List[Any]=4 , lowerCamelCase__ :Any=None , lowerCamelCase__ :List[Any]=None , lowerCamelCase__ :Dict="gelu" , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :Optional[int]=1e-12 , lowerCamelCase__ :Union[str, Any]=0.0 , lowerCamelCase__ :str=2_24 , lowerCamelCase__ :List[Any]=None , lowerCamelCase__ :Tuple=None , **lowerCamelCase__ :Tuple , ):
super().__init__(**lowerCamelCase__ )
UpperCamelCase__ :Optional[Any] = num_channels
UpperCamelCase__ :Dict = patch_size
UpperCamelCase__ :Optional[Any] = num_stages
UpperCamelCase__ :Tuple = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
UpperCamelCase__ :Optional[int] = [3, 3, 9, 3] if depths is None else depths
UpperCamelCase__ :Optional[Any] = hidden_act
UpperCamelCase__ :List[str] = initializer_range
UpperCamelCase__ :Union[str, Any] = layer_norm_eps
UpperCamelCase__ :Any = drop_path_rate
UpperCamelCase__ :Optional[int] = image_size
UpperCamelCase__ :Optional[Any] = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
UpperCamelCase__ , UpperCamelCase__ :Optional[int] = get_aligned_output_features_output_indices(
out_features=lowerCamelCase__ , out_indices=lowerCamelCase__ , stage_names=self.stage_names )
| 383
|
def A ( lowercase__ : int ) -> bool:
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
UpperCamelCase__ :Optional[int] = 4
UpperCamelCase__ :int = (1 << p) - 1
for _ in range(p - 2 ):
UpperCamelCase__ :str = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(11))
| 383
| 1
|
'''simple docstring'''
from __future__ import annotations
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : List[str] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = data
_A = None
_A = None
def __lowercase ( __lowercase ) -> None: # In Order traversal of the tree
'''simple docstring'''
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def __lowercase ( __lowercase ) -> int:
'''simple docstring'''
return 1 + max(depth_of_tree(tree.left ) , depth_of_tree(tree.right ) ) if tree else 0
def __lowercase ( __lowercase ) -> bool:
'''simple docstring'''
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def __lowercase ( ) -> None: # Main function for testing.
'''simple docstring'''
_A = Node(1 )
_A = Node(2 )
_A = Node(3 )
_A = Node(4 )
_A = Node(5 )
_A = Node(6 )
_A = Node(7 )
_A = Node(8 )
_A = Node(9 )
print(is_full_binary_tree(__UpperCAmelCase ) )
print(depth_of_tree(__UpperCAmelCase ) )
print("Tree is: " )
display(__UpperCAmelCase )
if __name__ == "__main__":
main()
| 330
|
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: int =0
if start < end:
snake_case: Tuple =randint(__UpperCAmelCase , __UpperCAmelCase )
snake_case: Optional[Any] =a[end]
snake_case: List[Any] =a[pivot]
snake_case: int =temp
snake_case , snake_case: Optional[int] =_in_place_partition(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
count += _in_place_quick_sort(__UpperCAmelCase , __UpperCAmelCase , p - 1 )
count += _in_place_quick_sort(__UpperCAmelCase , p + 1 , __UpperCAmelCase )
return count
def a_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case: Tuple =0
snake_case: Tuple =randint(__UpperCAmelCase , __UpperCAmelCase )
snake_case: List[Any] =a[end]
snake_case: List[str] =a[pivot]
snake_case: Dict =temp
snake_case: Union[str, Any] =start - 1
for index in range(__UpperCAmelCase , __UpperCAmelCase ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
snake_case: Optional[Any] =new_pivot_index + 1
snake_case: Dict =a[new_pivot_index]
snake_case: Union[str, Any] =a[index]
snake_case: List[Any] =temp
snake_case: Union[str, Any] =a[new_pivot_index + 1]
snake_case: Tuple =a[end]
snake_case: Union[str, Any] =temp
return new_pivot_index + 1, count
a = TemporaryFile()
a = 100 # 1000 elements are to be sorted
a , a = 0, 1 # mean and standard deviation
a = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
a = np.load(outfile)
a = len(M) - 1
a = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 350
| 0
|
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class lowerCamelCase__ ( UpperCamelCase_ ):
"""simple docstring"""
def __get__( self : Union[str, Any] ,a__ : Dict ,a__ : Optional[Any]=None ):
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
a__ = '__cached_' + self.fget.__name__
a__ = getattr(__a ,__a ,__a )
if cached is None:
a__ = self.fget(__a )
setattr(__a ,__a ,__a )
return cached
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F'invalid truth value {val!r}' )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if is_torch_fx_proxy(_lowercase ):
return True
if is_torch_available():
import torch
if isinstance(_lowercase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(_lowercase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(_lowercase , (jnp.ndarray, Tracer) ):
return True
return isinstance(_lowercase , np.ndarray )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return isinstance(_lowercase , np.ndarray )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return _is_numpy(_lowercase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
import torch
return isinstance(_lowercase , torch.Tensor )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(_lowercase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
import torch
return isinstance(_lowercase , torch.device )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(_lowercase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
import torch
if isinstance(_lowercase , _lowercase ):
if hasattr(_lowercase , _lowercase ):
a__ = getattr(_lowercase , _lowercase )
else:
return False
return isinstance(_lowercase , torch.dtype )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(_lowercase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
import tensorflow as tf
return isinstance(_lowercase , tf.Tensor )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(_lowercase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(_lowercase , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(_lowercase )
return type(_lowercase ) == tf.Tensor
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(_lowercase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(_lowercase , jnp.ndarray )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(_lowercase )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if isinstance(_lowercase , (dict, UserDict) ):
return {k: to_py_obj(_lowercase ) for k, v in obj.items()}
elif isinstance(_lowercase , (list, tuple) ):
return [to_py_obj(_lowercase ) for o in obj]
elif is_tf_tensor(_lowercase ):
return obj.numpy().tolist()
elif is_torch_tensor(_lowercase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(_lowercase ):
return np.asarray(_lowercase ).tolist()
elif isinstance(_lowercase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if isinstance(_lowercase , (dict, UserDict) ):
return {k: to_numpy(_lowercase ) for k, v in obj.items()}
elif isinstance(_lowercase , (list, tuple) ):
return np.array(_lowercase )
elif is_tf_tensor(_lowercase ):
return obj.numpy()
elif is_torch_tensor(_lowercase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(_lowercase ):
return np.asarray(_lowercase )
else:
return obj
class lowerCamelCase__ ( UpperCamelCase_ ):
"""simple docstring"""
def lowerCAmelCase_ ( self : str ):
a__ = fields(self )
# Safety and consistency checks
if not len(__a ):
raise ValueError(f'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f'{self.__class__.__name__} should not have more than one required field.' )
a__ = getattr(self ,class_fields[0].name )
a__ = all(getattr(self ,field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__a ):
if isinstance(__a ,__a ):
a__ = first_field.items()
a__ = True
else:
try:
a__ = iter(__a )
a__ = True
except TypeError:
a__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__a ):
if (
not isinstance(__a ,(list, tuple) )
or not len(__a ) == 2
or not isinstance(element[0] ,__a )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
a__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self ,element[0] ,element[1] )
if element[1] is not None:
a__ = element[1]
elif first_field is not None:
a__ = first_field
else:
for field in class_fields:
a__ = getattr(self ,field.name )
if v is not None:
a__ = v
def __delitem__( self : str ,*a__ : List[str] ,**a__ : Tuple ):
raise Exception(f'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def lowerCAmelCase_ ( self : Any ,*a__ : Tuple ,**a__ : str ):
raise Exception(f'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def lowerCAmelCase_ ( self : List[Any] ,*a__ : List[Any] ,**a__ : int ):
raise Exception(f'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def lowerCAmelCase_ ( self : Optional[Any] ,*a__ : int ,**a__ : Any ):
raise Exception(f'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : Optional[int] ,a__ : str ):
if isinstance(__a ,__a ):
a__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : List[Any] ,a__ : Optional[int] ,a__ : List[Any] ):
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__a ,__a )
super().__setattr__(__a ,__a )
def __setitem__( self : Any ,a__ : Optional[int] ,a__ : List[str] ):
super().__setitem__(__a ,__a )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__a ,__a )
def lowerCAmelCase_ ( self : Union[str, Any] ):
return tuple(self[k] for k in self.keys() )
class lowerCamelCase__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
@classmethod
def lowerCAmelCase_ ( cls : int ,a__ : List[str] ):
raise ValueError(
f'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class lowerCamelCase__ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase__ = '''longest'''
UpperCamelCase__ = '''max_length'''
UpperCamelCase__ = '''do_not_pad'''
class lowerCamelCase__ ( UpperCamelCase_ ):
"""simple docstring"""
UpperCamelCase__ = '''pt'''
UpperCamelCase__ = '''tf'''
UpperCamelCase__ = '''np'''
UpperCamelCase__ = '''jax'''
class lowerCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] ,a__ : int ):
a__ = context_managers
a__ = ExitStack()
def __enter__( self : Any ):
for context_manager in self.context_managers:
self.stack.enter_context(__a )
def __exit__( self : Dict ,*a__ : Union[str, Any] ,**a__ : int ):
self.stack.__exit__(*__a ,**__a )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = infer_framework(_lowercase )
if framework == "tf":
a__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
a__ = inspect.signature(model_class.forward ) # PyTorch models
else:
a__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
a__ = model_class.__name__
a__ = infer_framework(_lowercase )
if framework == "tf":
a__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
a__ = inspect.signature(model_class.forward ) # PyTorch models
else:
a__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def _lowerCAmelCase (_lowercase , _lowercase = "" , _lowercase = "." ):
"""simple docstring"""
def _flatten_dict(_lowercase , _lowercase="" , _lowercase="." ):
for k, v in d.items():
a__ = str(_lowercase ) + delimiter + str(_lowercase ) if parent_key else k
if v and isinstance(_lowercase , _lowercase ):
yield from flatten_dict(_lowercase , _lowercase , delimiter=_lowercase ).items()
else:
yield key, v
return dict(_flatten_dict(_lowercase , _lowercase , _lowercase ) )
@contextmanager
def _lowerCAmelCase (_lowercase , _lowercase = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def _lowerCAmelCase (_lowercase , _lowercase=None ):
"""simple docstring"""
if is_numpy_array(_lowercase ):
return np.transpose(_lowercase , axes=_lowercase )
elif is_torch_tensor(_lowercase ):
return array.T if axes is None else array.permute(*_lowercase )
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.transpose(_lowercase , perm=_lowercase )
elif is_jax_tensor(_lowercase ):
return jnp.transpose(_lowercase , axes=_lowercase )
else:
raise ValueError(F'Type not supported for transpose: {type(_lowercase )}.' )
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
if is_numpy_array(_lowercase ):
return np.reshape(_lowercase , _lowercase )
elif is_torch_tensor(_lowercase ):
return array.reshape(*_lowercase )
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.reshape(_lowercase , _lowercase )
elif is_jax_tensor(_lowercase ):
return jnp.reshape(_lowercase , _lowercase )
else:
raise ValueError(F'Type not supported for reshape: {type(_lowercase )}.' )
def _lowerCAmelCase (_lowercase , _lowercase=None ):
"""simple docstring"""
if is_numpy_array(_lowercase ):
return np.squeeze(_lowercase , axis=_lowercase )
elif is_torch_tensor(_lowercase ):
return array.squeeze() if axis is None else array.squeeze(dim=_lowercase )
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.squeeze(_lowercase , axis=_lowercase )
elif is_jax_tensor(_lowercase ):
return jnp.squeeze(_lowercase , axis=_lowercase )
else:
raise ValueError(F'Type not supported for squeeze: {type(_lowercase )}.' )
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
if is_numpy_array(_lowercase ):
return np.expand_dims(_lowercase , _lowercase )
elif is_torch_tensor(_lowercase ):
return array.unsqueeze(dim=_lowercase )
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.expand_dims(_lowercase , axis=_lowercase )
elif is_jax_tensor(_lowercase ):
return jnp.expand_dims(_lowercase , axis=_lowercase )
else:
raise ValueError(F'Type not supported for expand_dims: {type(_lowercase )}.' )
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if is_numpy_array(_lowercase ):
return np.size(_lowercase )
elif is_torch_tensor(_lowercase ):
return array.numel()
elif is_tf_tensor(_lowercase ):
import tensorflow as tf
return tf.size(_lowercase )
elif is_jax_tensor(_lowercase ):
return array.size
else:
raise ValueError(F'Type not supported for expand_dims: {type(_lowercase )}.' )
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(_lowercase , (tuple, list) ):
a__ = [F'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
a__ = F'{repo_id}--{value}'
return auto_map
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
for base_class in inspect.getmro(_lowercase ):
a__ = base_class.__module__
a__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F'Could not infer framework from class {model_class}.' )
| 717
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ : Any = {
"""configuration_trajectory_transformer""": [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TrajectoryTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ : Optional[int] = [
"""TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrajectoryTransformerModel""",
"""TrajectoryTransformerPreTrainedModel""",
"""load_tf_weights_in_trajectory_transformer""",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
UpperCamelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 394
| 0
|
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def A_( A : Any , A : str):
UpperCamelCase = []
for part_id in partition_order:
UpperCamelCase = df.where(f'''SPARK_PARTITION_ID() = {part_id}''').collect()
for row_idx, row in enumerate(A):
expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def A_( ):
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(100).repartition(1)
UpperCamelCase = Spark(A)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def A_( ):
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(10).repartition(2)
UpperCamelCase = [1, 0]
UpperCamelCase = _generate_iterable_examples(A , A) # Reverse the partitions.
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , A)
for i, (row_id, row_dict) in enumerate(generate_fn()):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A_( ):
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(10).repartition(1)
UpperCamelCase = SparkExamplesIterable(A)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A):
assert row_id == f'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def A_( ):
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator') as generator_mock:
UpperCamelCase = lambda A: x.reverse()
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0])
UpperCamelCase = SparkExamplesIterable(A).shuffle_data_sources(A)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A_( ):
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(20).repartition(4)
# Partitions 0 and 2
UpperCamelCase = SparkExamplesIterable(A).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2])
for i, (row_id, row_dict) in enumerate(A):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase = SparkExamplesIterable(A).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3])
for i, (row_id, row_dict) in enumerate(A):
UpperCamelCase , UpperCamelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def A_( ):
UpperCamelCase = pyspark.sql.SparkSession.builder.master('local[*]').appName('pyspark').getOrCreate()
UpperCamelCase = spark.range(100).repartition(1)
UpperCamelCase = Spark(A)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 3
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Optional[Any] = {
'configuration_falcon': ['FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FalconConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : str = [
'FALCON_PRETRAINED_MODEL_ARCHIVE_LIST',
'FalconForCausalLM',
'FalconModel',
'FalconPreTrainedModel',
'FalconForSequenceClassification',
'FalconForTokenClassification',
'FalconForQuestionAnswering',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 3
| 1
|
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class a__ ( _lowercase ):
def __init__(self : str, __UpperCAmelCase : pyspark.sql.DataFrame, __UpperCAmelCase : Optional[NamedSplit] = None, __UpperCAmelCase : Optional[Features] = None, __UpperCAmelCase : bool = True, __UpperCAmelCase : str = None, __UpperCAmelCase : bool = False, __UpperCAmelCase : str = None, __UpperCAmelCase : bool = True, __UpperCAmelCase : str = "arrow", **__UpperCAmelCase : Union[str, Any], ) -> str:
"""simple docstring"""
super().__init__(
split=UpperCAmelCase__, features=UpperCAmelCase__, cache_dir=UpperCAmelCase__, keep_in_memory=UpperCAmelCase__, streaming=UpperCAmelCase__, **UpperCAmelCase__, )
SCREAMING_SNAKE_CASE : str = load_from_cache_file
SCREAMING_SNAKE_CASE : int = file_format
SCREAMING_SNAKE_CASE : Optional[Any] = Spark(
df=UpperCAmelCase__, features=UpperCAmelCase__, cache_dir=UpperCAmelCase__, working_dir=UpperCAmelCase__, **UpperCAmelCase__, )
def lowercase__ (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
SCREAMING_SNAKE_CASE : Optional[Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=UpperCAmelCase__, file_format=self._file_format, )
return self.builder.as_dataset(split=self.split )
| 718
|
'''simple docstring'''
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
def __lowercase (_SCREAMING_SNAKE_CASE :Dict , _SCREAMING_SNAKE_CASE :int ):
SCREAMING_SNAKE_CASE : Dict = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm1.weight''', F'''encoder.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm1.bias''', F'''encoder.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.weight''', F'''encoder.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.attn.proj.bias''', F'''encoder.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.norm2.weight''', F'''encoder.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.norm2.bias''', F'''encoder.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.weight''', F'''encoder.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc1.bias''', F'''encoder.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append(
(F'''encoder.deit.blocks.{i}.mlp.fc2.weight''', F'''encoder.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''encoder.deit.blocks.{i}.mlp.fc2.bias''', F'''encoder.encoder.layer.{i}.output.dense.bias''') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('''encoder.deit.cls_token''', '''encoder.embeddings.cls_token'''),
('''encoder.deit.pos_embed''', '''encoder.embeddings.position_embeddings'''),
('''encoder.deit.patch_embed.proj.weight''', '''encoder.embeddings.patch_embeddings.projection.weight'''),
('''encoder.deit.patch_embed.proj.bias''', '''encoder.embeddings.patch_embeddings.projection.bias'''),
('''encoder.deit.norm.weight''', '''encoder.layernorm.weight'''),
('''encoder.deit.norm.bias''', '''encoder.layernorm.bias'''),
] )
return rename_keys
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Tuple ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
SCREAMING_SNAKE_CASE : str = state_dict.pop(F'''encoder.deit.blocks.{i}.attn.qkv.weight''' )
SCREAMING_SNAKE_CASE : Any = in_proj_weight[
: encoder_config.hidden_size, :
]
SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE : Dict = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowercase (_SCREAMING_SNAKE_CASE :int , _SCREAMING_SNAKE_CASE :str , _SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : List[str] = dct.pop(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Union[str, Any] = val
def __lowercase (_SCREAMING_SNAKE_CASE :List[str] ):
if "handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = '''https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg''' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE : Union[str, Any] = '''https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'''
SCREAMING_SNAKE_CASE : List[Any] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw ).convert('''RGB''' )
return im
@torch.no_grad()
def __lowercase (_SCREAMING_SNAKE_CASE :List[Any] , _SCREAMING_SNAKE_CASE :Optional[int] ):
SCREAMING_SNAKE_CASE : Union[str, Any] = ViTConfig(image_size=3_84 , qkv_bias=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
SCREAMING_SNAKE_CASE : List[Any] = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
SCREAMING_SNAKE_CASE : Union[str, Any] = 10_24
SCREAMING_SNAKE_CASE : Optional[Any] = 40_96
SCREAMING_SNAKE_CASE : List[str] = 24
SCREAMING_SNAKE_CASE : Any = 16
SCREAMING_SNAKE_CASE : Optional[Any] = 10_24
else:
raise ValueError('''Should either find \'base\' or \'large\' in checkpoint URL''' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[Any] = False
SCREAMING_SNAKE_CASE : Optional[Any] = '''relu'''
SCREAMING_SNAKE_CASE : Tuple = 10_24
SCREAMING_SNAKE_CASE : int = True
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : Any = False
# load HuggingFace model
SCREAMING_SNAKE_CASE : List[Any] = ViTModel(_SCREAMING_SNAKE_CASE , add_pooling_layer=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = TrOCRForCausalLM(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Any = VisionEncoderDecoderModel(encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
SCREAMING_SNAKE_CASE : List[Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location='''cpu''' , check_hash=_SCREAMING_SNAKE_CASE )['''model''']
SCREAMING_SNAKE_CASE : int = create_rename_keys(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
if key.startswith('''decoder''' ) and "output_projection" not in key:
SCREAMING_SNAKE_CASE : Tuple = val
else:
SCREAMING_SNAKE_CASE : int = val
# load state dict
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image
SCREAMING_SNAKE_CASE : List[Any] = ViTImageProcessor(size=encoder_config.image_size )
SCREAMING_SNAKE_CASE : Tuple = RobertaTokenizer.from_pretrained('''roberta-large''' )
SCREAMING_SNAKE_CASE : Dict = TrOCRProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : Tuple = processor(images=prepare_img(_SCREAMING_SNAKE_CASE ) , return_tensors='''pt''' ).pixel_values
# verify logits
SCREAMING_SNAKE_CASE : str = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
SCREAMING_SNAKE_CASE : Dict = model(pixel_values=_SCREAMING_SNAKE_CASE , decoder_input_ids=_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE : int = outputs.logits
SCREAMING_SNAKE_CASE : Any = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
SCREAMING_SNAKE_CASE : Any = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE : str = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _SCREAMING_SNAKE_CASE , atol=1E-3 ), "First elements of logits not as expected"
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
snake_case_ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 355
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 91
|
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __magic_name__ ( _a):
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__SCREAMING_SNAKE_CASE ,"width_multiplier" ) )
class __magic_name__ :
def __init__( self : List[Any] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Optional[Any]=1_3 ,__SCREAMING_SNAKE_CASE : Optional[int]=6_4 ,__SCREAMING_SNAKE_CASE : Dict=2 ,__SCREAMING_SNAKE_CASE : List[str]=3 ,__SCREAMING_SNAKE_CASE : int="swish" ,__SCREAMING_SNAKE_CASE : str=3 ,__SCREAMING_SNAKE_CASE : Optional[int]=3_2 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.02 ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : Union[str, Any]=True ,__SCREAMING_SNAKE_CASE : str=1_0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=None ,__SCREAMING_SNAKE_CASE : int=0.25 ,__SCREAMING_SNAKE_CASE : Tuple=0.0 ,__SCREAMING_SNAKE_CASE : Optional[int]=0.0 ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = make_divisible(5_1_2 * width_multiplier ,divisor=8 )
UpperCAmelCase = hidden_act
UpperCAmelCase = conv_kernel_size
UpperCAmelCase = output_stride
UpperCAmelCase = classifier_dropout_prob
UpperCAmelCase = use_labels
UpperCAmelCase = is_training
UpperCAmelCase = num_labels
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = width_multiplier
UpperCAmelCase = ffn_dropout
UpperCAmelCase = attn_dropout
def _UpperCAmelCase ( self : List[Any] ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.num_labels )
UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _UpperCAmelCase ( self : Dict ):
return MobileViTVaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,)
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : int ):
UpperCAmelCase = MobileViTVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape ,(
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCAmelCase ( self : Optional[int] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[Any] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Optional[int] ):
UpperCAmelCase = self.num_labels
UpperCAmelCase = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE ,labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.logits.shape ,(
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) ,)
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _a , _a , unittest.TestCase):
_UpperCAmelCase : Tuple = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : List[Any] = (
{
'feature-extraction': MobileViTVaModel,
'image-classification': MobileViTVaForImageClassification,
'image-segmentation': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : Union[str, Any] = False
_UpperCAmelCase : int = False
_UpperCAmelCase : Any = False
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = MobileViTVaModelTester(self )
UpperCAmelCase = MobileViTVaConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,has_text_modality=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def _UpperCAmelCase ( self : Any ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _UpperCAmelCase ( self : Any ):
pass
def _UpperCAmelCase ( self : str ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : str ,__SCREAMING_SNAKE_CASE : List[str] ):
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = 5
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase = 2
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,)
divisor *= 2
self.assertEqual(self.model_tester.output_stride ,divisor // 2 )
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : str ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Optional[int] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = MobileViTVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __UpperCamelCase ( ):
"""simple docstring"""
UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase):
@cached_property
def _UpperCAmelCase ( self : Any ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
UpperCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor([-1.63_36e00, -7.32_04e-02, -5.18_83e-01] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = outputs.logits
# verify the logits
UpperCAmelCase = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.tensor(
[
[[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]],
[[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]],
[[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]],
] ,device=__SCREAMING_SNAKE_CASE ,)
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
@slow
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = model.to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__SCREAMING_SNAKE_CASE ,return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = outputs.logits.detach().cpu()
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE ,target_sizes=[(5_0, 6_0)] )
UpperCAmelCase = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape ,__SCREAMING_SNAKE_CASE )
| 333
| 0
|
__lowercase : Optional[int] ="""Input must be a string of 8 numbers plus letter"""
__lowercase : str ="""TRWAGMYFPDXBNJZSQVHLCKE"""
def a__ ( lowercase__ ):
'''simple docstring'''
if not isinstance(lowercase__ , lowercase__ ):
UpperCAmelCase_ =F'Expected string as input, found {type(lowercase__ ).__name__}'
raise TypeError(lowercase__ )
UpperCAmelCase_ =spanish_id.replace("-" , "" ).upper()
if len(lowercase__ ) != 9:
raise ValueError(lowercase__ )
try:
UpperCAmelCase_ =int(spanish_id_clean[0:8] )
UpperCAmelCase_ =spanish_id_clean[8]
except ValueError as ex:
raise ValueError(lowercase__ ) from ex
if letter.isdigit():
raise ValueError(lowercase__ )
return letter == LOOKUP_LETTERS[number % 2_3]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 550
|
__lowercase : Dict ="""0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 550
| 1
|
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
snake_case__ :Optional[Any] = DebertaTokenizer
snake_case__ :int = True
snake_case__ :Dict = DebertaTokenizerFast
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"[UNK]",
]
lowerCAmelCase__ = dict(zip(__magic_name__ , range(len(__magic_name__ ) ) ) )
lowerCAmelCase__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase__ = {"unk_token": "[UNK]"}
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__magic_name__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__magic_name__ ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : str ):
"""simple docstring"""
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = "lower newer"
return input_text, output_text
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = "lower newer"
lowerCAmelCase__ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase__ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
lowerCAmelCase__ = tokens + [tokenizer.unk_token]
lowerCAmelCase__ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Any ):
"""simple docstring"""
lowerCAmelCase__ = self.get_tokenizer()
lowerCAmelCase__ = tokenizer("Hello" , "World" )
lowerCAmelCase__ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd["token_type_ids"] , __magic_name__ )
@slow
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = self.tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase__ = tokenizer.encode("sequence builders" , add_special_tokens=__magic_name__ )
lowerCAmelCase__ = tokenizer.encode("multi-sequence build" , add_special_tokens=__magic_name__ )
lowerCAmelCase__ = tokenizer.encode(
"sequence builders" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCAmelCase__ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=__magic_name__ , add_prefix_space=__magic_name__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__magic_name__ )
lowerCAmelCase__ = tokenizer.build_inputs_with_special_tokens(__magic_name__ , __magic_name__ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def __SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
lowerCAmelCase__ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCAmelCase__ = tokenizer_class.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase__ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
lowerCAmelCase__ = tokenizer(__magic_name__ , padding=__magic_name__ )
lowerCAmelCase__ = [tokenizer.decode(__magic_name__ , skip_special_tokens=__magic_name__ ) for seq in encoding["input_ids"]]
# fmt: off
lowerCAmelCase__ = {
"input_ids": [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
"token_type_ids": [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
"attention_mask": [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCAmelCase__ = [
"ALBERT: A Lite BERT for Self-supervised Learning of Language Representations",
"ALBERT incorporates two parameter reduction techniques",
"The first one is a factorized embedding parameterization. By decomposing the large vocabulary"
" embedding matrix into two small matrices, we separate the size of the hidden layers from the size of"
" vocabulary embedding.",
]
self.assertDictEqual(encoding.data , __magic_name__ )
for expected, decoded in zip(__magic_name__ , __magic_name__ ):
self.assertEqual(__magic_name__ , __magic_name__ )
| 48
|
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Tuple = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Union[str, Any] = 'umt5'
snake_case__ :Any = ['past_key_values']
def __init__( self : List[Any] , __magic_name__ : Tuple=250112 , __magic_name__ : str=512 , __magic_name__ : int=64 , __magic_name__ : str=1024 , __magic_name__ : Tuple=8 , __magic_name__ : Optional[int]=None , __magic_name__ : Optional[Any]=6 , __magic_name__ : Dict=32 , __magic_name__ : Optional[Any]=128 , __magic_name__ : Union[str, Any]=0.1 , __magic_name__ : int=1E-6 , __magic_name__ : Optional[int]=1.0 , __magic_name__ : Dict="gated-gelu" , __magic_name__ : List[str]=True , __magic_name__ : Tuple=True , __magic_name__ : Optional[int]="T5Tokenizer" , __magic_name__ : str=True , __magic_name__ : int=0 , __magic_name__ : Union[str, Any]=1 , __magic_name__ : str=0 , **__magic_name__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=__magic_name__ , tokenizer_class=__magic_name__ , tie_word_embeddings=__magic_name__ , pad_token_id=__magic_name__ , eos_token_id=__magic_name__ , decoder_start_token_id=__magic_name__ , **__magic_name__ , )
lowerCAmelCase__ = vocab_size
lowerCAmelCase__ = d_model
lowerCAmelCase__ = d_kv
lowerCAmelCase__ = d_ff
lowerCAmelCase__ = num_layers
lowerCAmelCase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = relative_attention_num_buckets
lowerCAmelCase__ = relative_attention_max_distance
lowerCAmelCase__ = dropout_rate
lowerCAmelCase__ = layer_norm_epsilon
lowerCAmelCase__ = initializer_factor
lowerCAmelCase__ = feed_forward_proj
lowerCAmelCase__ = use_cache
lowerCAmelCase__ = self.feed_forward_proj.split("-" )
lowerCAmelCase__ = act_info[-1]
lowerCAmelCase__ = act_info[0] == "gated"
if len(__magic_name__ ) > 1 and act_info[0] != "gated" or len(__magic_name__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
if feed_forward_proj == "gated-gelu":
lowerCAmelCase__ = "gelu_new"
@property
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return self.d_model
@property
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
return self.num_layers
class A ( SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __SCREAMING_SNAKE_CASE ( self : Dict ):
"""simple docstring"""
lowerCAmelCase__ = {
"input_ids": {0: "batch", 1: "encoder_sequence"},
"attention_mask": {0: "batch", 1: "encoder_sequence"},
}
if self.use_past:
lowerCAmelCase__ = "past_encoder_sequence + sequence"
lowerCAmelCase__ = {0: "batch"}
lowerCAmelCase__ = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
lowerCAmelCase__ = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__magic_name__ , direction="inputs" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 13
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return 5E-4
| 48
| 1
|
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class lowerCAmelCase_ ( lowerCAmelCase ):
"""simple docstring"""
_lowerCAmelCase : List[str] = """sew-d"""
def __init__( self , lowerCAmelCase=32 , lowerCAmelCase=7_68 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=30_72 , lowerCAmelCase=2 , lowerCAmelCase=5_12 , lowerCAmelCase=2_56 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=("p2c", "c2p") , lowerCAmelCase="layer_norm" , lowerCAmelCase="gelu_python" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.1 , lowerCAmelCase=0.02 , lowerCAmelCase=1E-7 , lowerCAmelCase=1E-5 , lowerCAmelCase="group" , lowerCAmelCase="gelu" , lowerCAmelCase=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCAmelCase=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCAmelCase=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCAmelCase=False , lowerCAmelCase=1_28 , lowerCAmelCase=16 , lowerCAmelCase=True , lowerCAmelCase=0.05 , lowerCAmelCase=10 , lowerCAmelCase=2 , lowerCAmelCase=0.0 , lowerCAmelCase=10 , lowerCAmelCase=0 , lowerCAmelCase="mean" , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=2_56 , lowerCAmelCase=0 , lowerCAmelCase=1 , lowerCAmelCase=2 , **lowerCAmelCase , ):
"""simple docstring"""
super().__init__(**__a , pad_token_id=__a , bos_token_id=__a , eos_token_id=__a )
snake_case = hidden_size
snake_case = feat_extract_norm
snake_case = feat_extract_activation
snake_case = list(__a )
snake_case = list(__a )
snake_case = list(__a )
snake_case = conv_bias
snake_case = num_conv_pos_embeddings
snake_case = num_conv_pos_embedding_groups
snake_case = len(self.conv_dim )
snake_case = num_hidden_layers
snake_case = intermediate_size
snake_case = squeeze_factor
snake_case = max_position_embeddings
snake_case = position_buckets
snake_case = share_att_key
snake_case = relative_attention
snake_case = norm_rel_ebd
snake_case = list(__a )
snake_case = hidden_act
snake_case = num_attention_heads
snake_case = hidden_dropout
snake_case = attention_dropout
snake_case = activation_dropout
snake_case = feat_proj_dropout
snake_case = final_dropout
snake_case = layer_norm_eps
snake_case = feature_layer_norm_eps
snake_case = initializer_range
snake_case = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case = apply_spec_augment
snake_case = mask_time_prob
snake_case = mask_time_length
snake_case = mask_time_min_masks
snake_case = mask_feature_prob
snake_case = mask_feature_length
snake_case = mask_feature_min_masks
# ctc loss
snake_case = ctc_loss_reduction
snake_case = ctc_zero_infinity
# sequence classification
snake_case = use_weighted_layer_sum
snake_case = classifier_proj_size
@property
def snake_case ( self ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 712
|
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = "0.18.2"
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 104
| 0
|
def _lowerCAmelCase ( _lowerCAmelCase ,_lowerCAmelCase ):
'''simple docstring'''
A_ : Tuple = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
A_ : Dict = n - k
# Calculate C(n,k)
for i in range(A__ ):
result *= n - i
result //= i + 1
return result
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return binomial_coefficient(2 * node_count ,A__ ) // (node_count + 1)
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
if n < 0:
raise ValueError("""factorial() not defined for negative values""" )
A_ : str = 1
for i in range(1 ,n + 1 ):
result *= i
return result
def _lowerCAmelCase ( _lowerCAmelCase ):
'''simple docstring'''
return catalan_number(A__ ) * factorial(A__ )
if __name__ == "__main__":
_lowerCAmelCase = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 569
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/config.json''',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : int = "xglm"
A__ : List[Any] = ["past_key_values"]
A__ : str = {
"num_attention_heads": "attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "num_layers",
}
def __init__( self , snake_case_=256008 , snake_case_=2048 , snake_case_=1024 , snake_case_=4096 , snake_case_=24 , snake_case_=16 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=True , snake_case_=True , snake_case_=2 , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> List[str]:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = ffn_dim
_UpperCAmelCase = num_layers
_UpperCAmelCase = attention_heads
_UpperCAmelCase = activation_function
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = init_std
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = use_cache
super().__init__(
pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , decoder_start_token_id=snake_case_ , **snake_case_ , )
| 426
| 0
|
"""simple docstring"""
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase__ = 0b10_11_00_11_11_10_11_00_10_01_00_00_01_11_10_11_10_11_00_01_10_01_11_10
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase__ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class SCREAMING_SNAKE_CASE__ :
def __init__(self ):
'''simple docstring'''
__a : Any = WATERMARK_BITS
__a : List[str] = WatermarkEncoder()
self.encoder.set_watermark("""bits""" , self.watermark )
def lowerCAmelCase__(self , _lowercase ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
__a : List[Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__a : List[str] = [self.encoder.encode(_lowercase , """dwtDct""" ) for image in images]
__a : List[str] = torch.from_numpy(np.array(_lowercase ) ).permute(0 , 3 , 1 , 2 )
__a : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 712
|
"""simple docstring"""
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( __snake_case , unittest.TestCase ):
_lowerCAmelCase = KandinskyVaaPriorPipeline
_lowerCAmelCase = ["prompt"]
_lowerCAmelCase = ["prompt", "negative_prompt"]
_lowerCAmelCase = [
"num_images_per_prompt",
"generator",
"num_inference_steps",
"latents",
"negative_prompt",
"guidance_scale",
"output_type",
"return_dict",
]
_lowerCAmelCase = False
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 32
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
return 100
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(_lowercase )
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : Dict = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 12,
"""embedding_dim""": self.text_embedder_hidden_size,
"""num_layers""": 1,
}
__a : Tuple = PriorTransformer(**_lowercase )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
__a : int = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
__a : Optional[Any] = CLIPVisionModelWithProjection(_lowercase )
return model
@property
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[Any] = CLIPImageProcessor(
crop_size=224 , do_center_crop=_lowercase , do_normalize=_lowercase , do_resize=_lowercase , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=224 , )
return image_processor
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = self.dummy_prior
__a : int = self.dummy_image_encoder
__a : Any = self.dummy_text_encoder
__a : int = self.dummy_tokenizer
__a : Optional[Any] = self.dummy_image_processor
__a : List[Any] = UnCLIPScheduler(
variance_type="""fixed_small_log""" , prediction_type="""sample""" , num_train_timesteps=1000 , clip_sample=_lowercase , clip_sample_range=10.0 , )
__a : List[Any] = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""scheduler""": scheduler,
"""image_processor""": image_processor,
}
return components
def lowerCAmelCase__(self , _lowercase , _lowercase=0 ):
'''simple docstring'''
if str(_lowercase ).startswith("""mps""" ):
__a : Dict = torch.manual_seed(_lowercase )
else:
__a : Union[str, Any] = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
__a : Union[str, Any] = {
"""prompt""": """horse""",
"""generator""": generator,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Union[str, Any] = """cpu"""
__a : Union[str, Any] = self.get_dummy_components()
__a : Dict = self.pipeline_class(**_lowercase )
__a : Tuple = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
__a : Optional[int] = pipe(**self.get_dummy_inputs(_lowercase ) )
__a : str = output.image_embeds
__a : Any = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
__a : List[Any] = image[0, -10:]
__a : List[Any] = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
__a : Optional[Any] = np.array(
[-0.0532, 1.7120, 0.3656, -1.0852, -0.8946, -1.1756, 0.4348, 0.2482, 0.5146, -0.1156] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Any = torch_device == """cpu"""
__a : Any = True
__a : Any = False
self._test_inference_batch_single_identical(
test_max_difference=_lowercase , relax_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
@skip_mps
def lowerCAmelCase__(self ):
'''simple docstring'''
__a : Optional[int] = torch_device == """cpu"""
__a : Union[str, Any] = False
self._test_attention_slicing_forward_pass(
test_max_difference=_lowercase , test_mean_pixel_difference=_lowercase , )
| 63
| 0
|
'''simple docstring'''
import gc
import inspect
import unittest
import torch
from parameterized import parameterized
from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin
enable_full_determinism()
class _lowercase ( __a ,unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase_ : List[str] = PriorTransformer
UpperCAmelCase_ : Any = '''hidden_states'''
@property
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : int = 4
UpperCAmelCase__ : List[Any] = 8
UpperCAmelCase__ : int = 7
UpperCAmelCase__ : List[str] = floats_tensor((batch_size, embedding_dim) ).to(__UpperCamelCase )
UpperCAmelCase__ : Union[str, Any] = floats_tensor((batch_size, embedding_dim) ).to(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase__ ( self ,lowerCamelCase_=0 ) -> Union[str, Any]:
'''simple docstring'''
torch.manual_seed(__UpperCamelCase )
UpperCAmelCase__ : Tuple = 4
UpperCAmelCase__ : Optional[int] = 8
UpperCAmelCase__ : List[Any] = 7
UpperCAmelCase__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase )
UpperCAmelCase__ : str = torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase )
UpperCAmelCase__ : Optional[int] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
@property
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
return (4, 8)
@property
def lowerCAmelCase__ ( self ) -> List[Any]:
'''simple docstring'''
return (4, 8)
def lowerCAmelCase__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ : Any = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 4,
"""num_layers""": 2,
"""embedding_dim""": 8,
"""num_embeddings""": 7,
"""additional_embeddings""": 4,
}
UpperCAmelCase__ : int = self.dummy_input
return init_dict, inputs_dict
def lowerCAmelCase__ ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Dict = PriorTransformer.from_pretrained(
'''hf-internal-testing/prior-dummy''' ,output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info['''missing_keys'''] ) ,0 )
model.to(__UpperCamelCase )
UpperCAmelCase__ : str = model(**self.dummy_input )[0]
assert hidden_states is not None, "Make sure output is not None"
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.prepare_init_args_and_inputs_for_common()
UpperCAmelCase__ : Optional[Any] = self.model_class(**__UpperCamelCase )
UpperCAmelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase__ : Optional[Any] = [*signature.parameters.keys()]
UpperCAmelCase__ : int = ["""hidden_states""", """timestep"""]
self.assertListEqual(arg_names[:2] ,__UpperCamelCase )
def lowerCAmelCase__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ : Tuple = PriorTransformer.from_pretrained('''hf-internal-testing/prior-dummy''' )
UpperCAmelCase__ : Any = model.to(__UpperCamelCase )
if hasattr(__UpperCamelCase ,'''set_default_attn_processor''' ):
model.set_default_attn_processor()
UpperCAmelCase__ : Dict = self.get_dummy_seed_input()
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(**__UpperCamelCase )[0]
UpperCAmelCase__ : Dict = output[0, :5].flatten().cpu()
print(__UpperCamelCase )
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
UpperCAmelCase__ : int = torch.tensor([-1.3436, -0.2870, 0.7538, 0.4368, -0.0239] )
self.assertTrue(torch_all_close(__UpperCamelCase ,__UpperCamelCase ,rtol=1e-2 ) )
@slow
class _lowercase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self ,lowerCamelCase_=1 ,lowerCamelCase_=768 ,lowerCamelCase_=77 ,lowerCamelCase_=0 ) -> List[str]:
'''simple docstring'''
torch.manual_seed(__UpperCamelCase )
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : List[Any] = embedding_dim
UpperCAmelCase__ : Optional[int] = num_embeddings
UpperCAmelCase__ : Optional[Any] = torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase )
UpperCAmelCase__ : int = torch.randn((batch_size, embedding_dim) ).to(__UpperCamelCase )
UpperCAmelCase__ : int = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(__UpperCamelCase )
return {
"hidden_states": hidden_states,
"timestep": 2,
"proj_embedding": proj_embedding,
"encoder_hidden_states": encoder_hidden_states,
}
def lowerCAmelCase__ ( self ) -> int:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@parameterized.expand(
[
# fmt: off
[13, [-0.5861, 0.1283, -0.0931, 0.0882, 0.4476, 0.1329, -0.0498, 0.0640]],
[37, [-0.4913, 0.0110, -0.0483, 0.0541, 0.4954, -0.0170, 0.0354, 0.1651]],
# fmt: on
] )
def lowerCAmelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> Any:
'''simple docstring'''
UpperCAmelCase__ : Tuple = PriorTransformer.from_pretrained('''kandinsky-community/kandinsky-2-1-prior''' ,subfolder='''prior''' )
model.to(__UpperCamelCase )
UpperCAmelCase__ : int = self.get_dummy_seed_input(seed=__UpperCamelCase )
with torch.no_grad():
UpperCAmelCase__ : Any = model(**__UpperCamelCase )[0]
assert list(sample.shape ) == [1, 768]
UpperCAmelCase__ : Union[str, Any] = sample[0, :8].flatten().cpu()
print(__UpperCamelCase )
UpperCAmelCase__ : Dict = torch.tensor(__UpperCamelCase )
assert torch_all_close(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 )
| 614
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCamelCase ( __a ):
def __init__(self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ) -> Tuple:
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
""" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"""
""" results in services or applications open to the public. Both the diffusers team and Hugging Face"""
""" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"""
""" it only for use-cases that involve analyzing network behavior or auditing its results. For more"""
""" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .""" )
self.register_modules(
speech_model=__UpperCamelCase , speech_processor=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , unet=__UpperCamelCase , scheduler=__UpperCamelCase , feature_extractor=__UpperCamelCase , )
def A_ (self , __UpperCamelCase = "auto" ) -> List[str]:
if slice_size == "auto":
UpperCamelCase_ : Any = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__UpperCamelCase )
def A_ (self ) -> Any:
self.enable_attention_slicing(__UpperCamelCase )
@torch.no_grad()
def __call__(self , __UpperCamelCase , __UpperCamelCase=16_000 , __UpperCamelCase = 512 , __UpperCamelCase = 512 , __UpperCamelCase = 50 , __UpperCamelCase = 7.5 , __UpperCamelCase = None , __UpperCamelCase = 1 , __UpperCamelCase = 0.0 , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = "pil" , __UpperCamelCase = True , __UpperCamelCase = None , __UpperCamelCase = 1 , **__UpperCamelCase , ) -> Optional[int]:
UpperCamelCase_ : str = self.speech_processor.feature_extractor(
__UpperCamelCase , return_tensors="""pt""" , sampling_rate=__UpperCamelCase ).input_features.to(self.device )
UpperCamelCase_ : List[Any] = self.speech_model.generate(__UpperCamelCase , max_length=480_000 )
UpperCamelCase_ : List[Any] = self.speech_processor.tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase , normalize=__UpperCamelCase )[
0
]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : List[Any] = 1
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Optional[int] = len(__UpperCamelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__UpperCamelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__UpperCamelCase , __UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__UpperCamelCase )}.''' )
# get prompt text embeddings
UpperCamelCase_ : List[Any] = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
UpperCamelCase_ : Dict = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCamelCase_ : List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
UpperCamelCase_ : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCamelCase_ : Tuple = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_,UpperCamelCase_,UpperCamelCase_ : Any = text_embeddings.shape
UpperCamelCase_ : Union[str, Any] = text_embeddings.repeat(1 , __UpperCamelCase , 1 )
UpperCamelCase_ : Optional[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __UpperCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
UpperCamelCase_ : List[Any] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
UpperCamelCase_ : List[str]
if negative_prompt is None:
UpperCamelCase_ : Optional[Any] = [""""""] * batch_size
elif type(__UpperCamelCase ) is not type(__UpperCamelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__UpperCamelCase )} !='''
f''' {type(__UpperCamelCase )}.''' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase_ : Any = [negative_prompt]
elif batch_size != len(__UpperCamelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__UpperCamelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
UpperCamelCase_ : Optional[int] = negative_prompt
UpperCamelCase_ : List[Any] = text_input_ids.shape[-1]
UpperCamelCase_ : Any = self.tokenizer(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""pt""" , )
UpperCamelCase_ : Union[str, Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCamelCase_ : List[str] = uncond_embeddings.shape[1]
UpperCamelCase_ : List[str] = uncond_embeddings.repeat(1 , __UpperCamelCase , 1 )
UpperCamelCase_ : Tuple = uncond_embeddings.view(batch_size * num_images_per_prompt , __UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCamelCase_ : str = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
UpperCamelCase_ : Union[str, Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
UpperCamelCase_ : Optional[int] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
UpperCamelCase_ : str = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device="""cpu""" , dtype=__UpperCamelCase ).to(
self.device )
else:
UpperCamelCase_ : Optional[Any] = torch.randn(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
UpperCamelCase_ : Optional[int] = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
UpperCamelCase_ : Any = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
UpperCamelCase_ : List[str] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
UpperCamelCase_ : Optional[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
UpperCamelCase_ : Union[str, Any] = {}
if accepts_eta:
UpperCamelCase_ : Any = eta
for i, t in enumerate(self.progress_bar(__UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
UpperCamelCase_ : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
UpperCamelCase_ : int = self.scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
# predict the noise residual
UpperCamelCase_ : Tuple = self.unet(__UpperCamelCase , __UpperCamelCase , encoder_hidden_states=__UpperCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
UpperCamelCase_,UpperCamelCase_ : Union[str, Any] = noise_pred.chunk(2 )
UpperCamelCase_ : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
UpperCamelCase_ : List[Any] = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
UpperCamelCase_ : List[str] = 1 / 0.18_215 * latents
UpperCamelCase_ : List[Any] = self.vae.decode(__UpperCamelCase ).sample
UpperCamelCase_ : Dict = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCamelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
UpperCamelCase_ : List[Any] = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__UpperCamelCase , nsfw_content_detected=__UpperCamelCase )
| 635
| 0
|
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[Any] ):
SCREAMING_SNAKE_CASE = inspect.getfile(accelerate.test_utils )
SCREAMING_SNAKE_CASE = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
SCREAMING_SNAKE_CASE = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _snake_case ( self : List[str] ):
SCREAMING_SNAKE_CASE = f"\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n ".split()
SCREAMING_SNAKE_CASE = [sys.executable] + distributed_args
execute_subprocess_async(__lowerCamelCase , env=os.environ.copy() )
| 703
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__A : Union[str, Any] = {'configuration_fnet': ['FNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = ['FNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Any = ['FNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Union[str, Any] = [
'FNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FNetForMaskedLM',
'FNetForMultipleChoice',
'FNetForNextSentencePrediction',
'FNetForPreTraining',
'FNetForQuestionAnswering',
'FNetForSequenceClassification',
'FNetForTokenClassification',
'FNetLayer',
'FNetModel',
'FNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
__A : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 698
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.