code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : str ) -> bool:
'''simple docstring'''
_UpperCAmelCase = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
_UpperCAmelCase = set()
return any(
node not in visited and depth_first_search(__lowercase , __lowercase , __lowercase , __lowercase )
for node in graph )
def UpperCAmelCase_ ( __lowercase : int , __lowercase : str , __lowercase : Tuple , __lowercase : Dict ) -> bool:
'''simple docstring'''
visited.add(__lowercase )
rec_stk.add(__lowercase )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(__lowercase , __lowercase , __lowercase , __lowercase ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(__lowercase )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 22
|
'''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('fixtures/test_sentencepiece.model')
@require_sentencepiece
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : str = ReformerTokenizer
_SCREAMING_SNAKE_CASE : List[Any] = ReformerTokenizerFast
_SCREAMING_SNAKE_CASE : Any = True
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
_lowercase : Union[str, Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = "<s>"
_lowercase : str = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCamelCase ) , _UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCamelCase ) , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(_UpperCamelCase ) , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _lowerCamelCase ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
_lowercase : List[str] = self.get_tokenizer()
_lowercase : int = self.get_rust_tokenizer()
_lowercase : Optional[Any] = "I was born in 92000, and this is falsé."
_lowercase : Union[str, Any] = tokenizer.tokenize(_UpperCamelCase )
_lowercase : int = rust_tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : Tuple = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
_lowercase : List[str] = self.get_rust_tokenizer()
_lowercase : Optional[int] = tokenizer.encode(_UpperCamelCase )
_lowercase : Any = rust_tokenizer.encode(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
_lowercase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# Simple input
_lowercase : Union[str, Any] = "This is a simple input"
_lowercase : Union[str, Any] = ["This is a simple input 1", "This is a simple input 2"]
_lowercase : Any = ("This is a simple input", "This is a pair")
_lowercase : List[str] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Simple input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(_UpperCamelCase , tokenizer_r.encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" )
# Pair input
self.assertRaises(
_UpperCamelCase , tokenizer_r.batch_encode_plus , _UpperCamelCase , max_length=_UpperCamelCase , padding="max_length" , )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = ReformerTokenizer(_UpperCamelCase , keep_accents=_UpperCamelCase )
_lowercase : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(_UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCamelCase ) , [285, 46, 10, 170, 382] , )
_lowercase : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_lowercase : str = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
_lowercase : Optional[int] = tokenizer.convert_ids_to_tokens(_UpperCamelCase )
self.assertListEqual(
_UpperCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _lowerCamelCase ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = "Hello World!"
_lowercase : Optional[Any] = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
_lowercase : Dict = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(_UpperCamelCase , self.big_tokenizer.encode(_UpperCamelCase ) )
@require_torch
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
_lowercase : Any = list(self.big_tokenizer.get_vocab().keys() )[:10]
_lowercase : Tuple = " ".join(_UpperCamelCase )
_lowercase : str = self.big_tokenizer.encode_plus(_UpperCamelCase , return_tensors="pt" )
_lowercase : List[Any] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
_lowercase : Optional[Any] = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
_lowercase : str = encoded_sequence["input_ids"].shape
_lowercase : Dict = ReformerModel(_UpperCamelCase )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**_UpperCamelCase )
model(**_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
_lowercase : Optional[int] = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCamelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=_UpperCamelCase , sequences=_UpperCamelCase , )
| 250
| 0
|
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase: Optional[Any] = logging.get_logger()
def lowerCamelCase__ ( _A , _A , _A , _A , _A = True ):
print(f"""Converting {name}...""" )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
a : Optional[int] = timm.create_model('levit_128s' , pretrained=_A )
else:
a : Optional[Any] = timm.create_model('levit_128' , pretrained=_A )
if hidden_sizes == 192:
a : Optional[Any] = timm.create_model('levit_192' , pretrained=_A )
if hidden_sizes == 256:
a : Dict = timm.create_model('levit_256' , pretrained=_A )
if hidden_sizes == 384:
a : List[Any] = timm.create_model('levit_384' , pretrained=_A )
from_model.eval()
a : Union[str, Any] = LevitForImageClassificationWithTeacher(_A ).eval()
a : Union[str, Any] = OrderedDict()
a : Optional[Any] = from_model.state_dict()
a : List[Any] = list(from_model.state_dict().keys() )
a : List[Any] = list(our_model.state_dict().keys() )
print(len(_A ) , len(_A ) )
for i in range(len(_A ) ):
a : Any = weights[og_keys[i]]
our_model.load_state_dict(_A )
a : Union[str, Any] = torch.randn((2, 3, 224, 224) )
a : Optional[Any] = from_model(_A )
a : List[str] = our_model(_A ).logits
assert torch.allclose(_A , _A ), "The model logits don't match the original one."
a : List[str] = name
print(_A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
a : Optional[Any] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(f"""Pushed {checkpoint_name}""" )
def lowerCamelCase__ ( _A , _A = None , _A = True ):
a : Dict = 'imagenet-1k-id2label.json'
a : Any = 1000
a : Union[str, Any] = (1, num_labels)
a : Dict = 'huggingface/label-files'
a : str = num_labels
a : str = json.load(open(hf_hub_download(_A , _A , repo_type='dataset' ) , 'r' ) )
a : Union[str, Any] = {int(_A ): v for k, v in idalabel.items()}
a : Tuple = idalabel
a : Optional[Any] = {v: k for k, v in idalabel.items()}
a : Any = partial(_A , num_labels=_A , idalabel=_A , labelaid=_A )
a : Optional[Any] = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
a : Dict = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , _A , names_to_config[model_name] , _A , _A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , _A , _A , _A , _A )
return config, expected_shape
if __name__ == "__main__":
lowerCAmelCase: int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
lowerCAmelCase: Optional[Any] = parser.parse_args()
lowerCAmelCase: Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 357
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a__:
def __init__( self : Optional[int] ):
a : int = ''
a : List[str] = ''
a : int = []
a : Optional[Any] = 0
a : Optional[Any] = 2_56
a : int = 0
a : Optional[int] = 0
a : str = 0
a : int = 0
def lowercase_ ( self : List[str] , __snake_case : int ):
a : Optional[Any] = cva.imread(__snake_case , 0 )
a : int = copy.deepcopy(self.img )
a , a , a : Optional[int] = plt.hist(self.img.ravel() , 2_56 , [0, 2_56] , label='x' )
a : str = np.sum(__snake_case )
for i in range(len(__snake_case ) ):
a : List[str] = x[i] / self.k
self.sk += prk
a : List[Any] = (self.L - 1) * self.sk
if self.rem != 0:
a : Union[str, Any] = int(last % last )
a : int = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(__snake_case )
a : int = int(np.ma.count(self.img ) / self.img[1].size )
a : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
a : Tuple = self.img[j][i]
if num != self.last_list[num]:
a : Union[str, Any] = self.last_list[num]
cva.imwrite('output_data/output.jpg' , self.img )
def lowercase_ ( self : Union[str, Any] ):
plt.hist(self.img.ravel() , 2_56 , [0, 2_56] )
def lowercase_ ( self : Any ):
cva.imshow('Output-Image' , self.img )
cva.imshow('Input-Image' , self.original_image )
cva.waitKey(50_00 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCAmelCase: Dict = os.path.join(os.path.basename(__file__), 'image_data/input.jpg')
lowerCAmelCase: Optional[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 96
| 0
|
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCAmelCase ( _snake_case , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = VideoToVideoSDPipeline
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'video'} ) - {"""image""", """width""", """height"""}
__SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'video'} ) - {"""image"""}
__SCREAMING_SNAKE_CASE : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
__SCREAMING_SNAKE_CASE : Any = False
# No `output_type`.
__SCREAMING_SNAKE_CASE : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def _a (self ):
torch.manual_seed(0 )
A_ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=32 , attention_head_dim=4 , )
A_ : Union[str, Any] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" , clip_sample=__snake_case , set_alpha_to_one=__snake_case , )
torch.manual_seed(0 )
A_ : List[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
A_ : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
A_ : Dict = CLIPTextModel(__snake_case )
A_ : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _a (self , lowercase , lowercase=0 ):
# 3 frames
A_ : List[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(__snake_case ) ).to(__snake_case )
if str(__snake_case ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(__snake_case )
else:
A_ : Tuple = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
A_ : str = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""video""": video,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _a (self ):
A_ : Any = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[int] = self.get_dummy_components()
A_ : int = VideoToVideoSDPipeline(**__snake_case )
A_ : Union[str, Any] = sd_pipe.to(__snake_case )
sd_pipe.set_progress_bar_config(disable=__snake_case )
A_ : Tuple = self.get_dummy_inputs(__snake_case )
A_ : List[str] = """np"""
A_ : int = sd_pipe(**__snake_case ).frames
A_ : str = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
A_ : List[str] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _a (self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__snake_case , expected_max_diff=5E-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _a (self ):
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _a (self ):
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _a (self ):
pass
def _a (self ):
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Tuple = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
A_ : List[str] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ : Optional[Any] = torch.randn((1, 10, 3, 1024, 576) , generator=__snake_case )
A_ : Tuple = video.to("""cuda""" )
A_ : str = """Spiderman is surfing"""
A_ : int = pipe(__snake_case , video=__snake_case , generator=__snake_case , num_inference_steps=3 , output_type="""pt""" ).frames
A_ : Optional[int] = np.array([-1.0_45_89_84, -1.1_27_92_97, -0.9_66_30_86, -0.91_50_39_06, -0.75_09_76_56] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 206
|
'''simple docstring'''
from typing import Any
class lowercase__ :
'''simple docstring'''
def __init__( self , __snake_case ):
_SCREAMING_SNAKE_CASE : Dict = data
_SCREAMING_SNAKE_CASE : Optional[int] = None
def __repr__( self ):
return f"""Node({self.data})"""
class lowercase__ :
'''simple docstring'''
def __init__( self ):
_SCREAMING_SNAKE_CASE : Any = None
def __iter__( self ):
_SCREAMING_SNAKE_CASE : Any = self.head
while node:
yield node.data
_SCREAMING_SNAKE_CASE : List[Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(__snake_case ) for item in self] )
def __getitem__( self , __snake_case ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __snake_case , __snake_case ):
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
_SCREAMING_SNAKE_CASE : Any = self.head
for _ in range(__snake_case ):
_SCREAMING_SNAKE_CASE : List[Any] = current.next
_SCREAMING_SNAKE_CASE : Dict = data
def UpperCAmelCase_ ( self , __snake_case ):
self.insert_nth(len(self ) , __snake_case )
def UpperCAmelCase_ ( self , __snake_case ):
self.insert_nth(0 , __snake_case )
def UpperCAmelCase_ ( self , __snake_case , __snake_case ):
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
_SCREAMING_SNAKE_CASE : Optional[int] = Node(__snake_case )
if self.head is None:
_SCREAMING_SNAKE_CASE : str = new_node
elif index == 0:
_SCREAMING_SNAKE_CASE : Tuple = self.head # link new_node to head
_SCREAMING_SNAKE_CASE : str = new_node
else:
_SCREAMING_SNAKE_CASE : Tuple = self.head
for _ in range(index - 1 ):
_SCREAMING_SNAKE_CASE : List[str] = temp.next
_SCREAMING_SNAKE_CASE : Tuple = temp.next
_SCREAMING_SNAKE_CASE : Dict = new_node
def UpperCAmelCase_ ( self ): # print every node data
print(self )
def UpperCAmelCase_ ( self ):
return self.delete_nth(0 )
def UpperCAmelCase_ ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def UpperCAmelCase_ ( self , __snake_case = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
_SCREAMING_SNAKE_CASE : Tuple = self.head # default first node
if index == 0:
_SCREAMING_SNAKE_CASE : List[Any] = self.head.next
else:
_SCREAMING_SNAKE_CASE : Tuple = self.head
for _ in range(index - 1 ):
_SCREAMING_SNAKE_CASE : Any = temp.next
_SCREAMING_SNAKE_CASE : Any = temp.next
_SCREAMING_SNAKE_CASE : Union[str, Any] = temp.next.next
return delete_node.data
def UpperCAmelCase_ ( self ):
return self.head is None
def UpperCAmelCase_ ( self ):
_SCREAMING_SNAKE_CASE : Dict = None
_SCREAMING_SNAKE_CASE : int = self.head
while current:
# Store the current node's next node.
_SCREAMING_SNAKE_CASE : List[Any] = current.next
# Make the current node's next point backwards
_SCREAMING_SNAKE_CASE : Tuple = prev
# Make the previous node be the current node
_SCREAMING_SNAKE_CASE : int = current
# Make the current node the next node (to progress iteration)
_SCREAMING_SNAKE_CASE : Any = next_node
# Return prev in order to put the head at the end
_SCREAMING_SNAKE_CASE : Union[str, Any] = prev
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = LinkedList()
assert linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(SCREAMING_SNAKE_CASE__ ) == 9
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_SCREAMING_SNAKE_CASE : List[str] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(-8 , 1 ) )
def snake_case_ ( ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = [
-9,
100,
Node(7734_5112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_9_2.5_5_5_5_5,
"""Hello, world!""",
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
_SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
for i in test_input:
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(SCREAMING_SNAKE_CASE__ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_SCREAMING_SNAKE_CASE : Any = linked_list.delete_head()
assert result == -9
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_SCREAMING_SNAKE_CASE : List[Any] = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_SCREAMING_SNAKE_CASE : Optional[int] = linked_list.delete_nth(10 )
assert result is None
assert (
str(SCREAMING_SNAKE_CASE__ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(SCREAMING_SNAKE_CASE__ )
assert (
str(SCREAMING_SNAKE_CASE__ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(SCREAMING_SNAKE_CASE__ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def snake_case_ ( ):
"""simple docstring"""
from doctest import testmod
testmod()
_SCREAMING_SNAKE_CASE : Optional[int] = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(SCREAMING_SNAKE_CASE__ )
print("""\nReading/changing Node data using indexing:""" )
print(f"""Element at Position 1: {linked_list[1]}""" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(SCREAMING_SNAKE_CASE__ )
print(f"""length of linked_list is : {len(SCREAMING_SNAKE_CASE__ )}""" )
if __name__ == "__main__":
main()
| 200
| 0
|
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCAmelCase_ = False, False, False
@dataclass
class __A :
'''simple docstring'''
lowerCAmelCase : Union[str, Any] = None
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Union[str, Any] = True
lowerCAmelCase : Tuple = None
# Automatically constructed
lowerCAmelCase : Union[str, Any] = "dict"
lowerCAmelCase : Optional[int] = pa.struct({"bytes": pa.binary(), "path": pa.string()} )
lowerCAmelCase : Optional[Any] = field(default="Audio" ,init=lowerCamelCase__ ,repr=lowerCamelCase__ )
def __call__( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.pa_type
def UpperCAmelCase ( self : Any ,_snake_case : Union[str, bytes, dict] ) -> List[str]:
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(_snake_case ,_snake_case ):
return {"bytes": None, "path": value}
elif isinstance(_snake_case ,_snake_case ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase__ : List[Any] = BytesIO()
sf.write(_snake_case ,value['''array'''] ,value['''sampling_rate'''] ,format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase__ : int = np.frombuffer(value['''bytes'''] ,dtype=np.intaa ).astype(np.floataa ) / 32_767
else:
lowercase__ : Tuple = np.memmap(value['''path'''] ,dtype='''h''' ,mode='''r''' ).astype(np.floataa ) / 32_767
lowercase__ : Dict = BytesIO(bytes() )
sf.write(_snake_case ,_snake_case ,value['''sampling_rate'''] ,format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : dict ,_snake_case : Optional[Dict[str, Union[str, bool, None]]] = None ) -> Union[str, Any]:
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
lowercase__ : Any = (value["path"], BytesIO(value['''bytes'''] )) if value["bytes"] is not None else (value["path"], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
lowercase__ : Dict = xsplitext(_snake_case )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. ''' )
if file is None:
lowercase__ : Optional[int] = token_per_repo_id or {}
lowercase__ : Optional[Any] = path.split('''::''' )[-1]
try:
lowercase__ : int = string_to_dict(_snake_case ,config.HUB_DATASETS_URL )["repo_id"]
lowercase__ : List[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase__ : Dict = None
with xopen(_snake_case ,'''rb''' ,use_auth_token=_snake_case ) as f:
lowercase__ : Optional[int] = sf.read(_snake_case )
else:
lowercase__ : Union[str, Any] = sf.read(_snake_case )
lowercase__ : Any = array.T
if self.mono:
lowercase__ : Optional[int] = librosa.to_mono(_snake_case )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase__ : Union[str, Any] = librosa.resample(_snake_case ,orig_sr=_snake_case ,target_sr=self.sampling_rate )
lowercase__ : Union[str, Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def UpperCAmelCase ( self : str ) -> List[str]:
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def UpperCAmelCase ( self : List[Any] ,_snake_case : Union[pa.StringArray, pa.StructArray] ) -> Tuple:
"""simple docstring"""
if pa.types.is_string(storage.type ):
lowercase__ : Union[str, Any] = pa.array([None] * len(_snake_case ) ,type=pa.binary() )
lowercase__ : Optional[int] = pa.StructArray.from_arrays([bytes_array, storage] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase__ : Any = pa.array([None] * len(_snake_case ) ,type=pa.string() )
lowercase__ : Dict = pa.StructArray.from_arrays([storage, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
lowercase__ : Union[str, Any] = pa.array([Audio().encode_example(_snake_case ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowercase__ : Optional[Any] = storage.field('''bytes''' )
else:
lowercase__ : Any = pa.array([None] * len(_snake_case ) ,type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowercase__ : Dict = storage.field('''path''' )
else:
lowercase__ : Any = pa.array([None] * len(_snake_case ) ,type=pa.string() )
lowercase__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=storage.is_null() )
return array_cast(_snake_case ,self.pa_type )
def UpperCAmelCase ( self : str ,_snake_case : pa.StructArray ) -> Dict:
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(_snake_case : List[Any] ):
with xopen(_snake_case ,'''rb''' ) as f:
lowercase__ : str = f.read()
return bytes_
lowercase__ : str = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
lowercase__ : List[str] = pa.array(
[os.path.basename(_snake_case ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] ,type=pa.string() ,)
lowercase__ : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['''bytes''', '''path'''] ,mask=bytes_array.is_null() )
return array_cast(_snake_case ,self.pa_type )
| 354
|
"""simple docstring"""
import numpy as np
import torch
import tqdm
from ...models.unet_ad import UNetaDModel
from ...pipelines import DiffusionPipeline
from ...utils import randn_tensor
from ...utils.dummy_pt_objects import DDPMScheduler
class __A ( A_ ):
'''simple docstring'''
def __init__( self : Any ,_snake_case : UNetaDModel ,_snake_case : UNetaDModel ,_snake_case : DDPMScheduler ,_snake_case : Any ,) -> List[Any]:
"""simple docstring"""
super().__init__()
lowercase__ : Optional[int] = value_function
lowercase__ : Optional[int] = unet
lowercase__ : Tuple = scheduler
lowercase__ : Dict = env
lowercase__ : int = env.get_dataset()
lowercase__ : Dict = {}
for key in self.data.keys():
try:
lowercase__ : Optional[Any] = self.data[key].mean()
except: # noqa: E722
pass
lowercase__ : List[Any] = {}
for key in self.data.keys():
try:
lowercase__ : str = self.data[key].std()
except: # noqa: E722
pass
lowercase__ : Tuple = env.observation_space.shape[0]
lowercase__ : Optional[int] = env.action_space.shape[0]
def UpperCAmelCase ( self : str ,_snake_case : Any ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
return (x_in - self.means[key]) / self.stds[key]
def UpperCAmelCase ( self : Dict ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
return x_in * self.stds[key] + self.means[key]
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Dict ) -> Optional[int]:
"""simple docstring"""
if type(_snake_case ) is dict:
return {k: self.to_torch(_snake_case ) for k, v in x_in.items()}
elif torch.is_tensor(_snake_case ):
return x_in.to(self.unet.device )
return torch.tensor(_snake_case ,device=self.unet.device )
def UpperCAmelCase ( self : Union[str, Any] ,_snake_case : Any ,_snake_case : int ,_snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
for key, val in cond.items():
lowercase__ : List[Any] = val.clone()
return x_in
def UpperCAmelCase ( self : int ,_snake_case : Optional[int] ,_snake_case : List[Any] ,_snake_case : int ,_snake_case : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Any = x.shape[0]
lowercase__ : Dict = None
for i in tqdm.tqdm(self.scheduler.timesteps ):
# create batch of timesteps to pass into model
lowercase__ : Dict = torch.full((batch_size,) ,_snake_case ,device=self.unet.device ,dtype=torch.long )
for _ in range(_snake_case ):
with torch.enable_grad():
x.requires_grad_()
# permute to match dimension for pre-trained models
lowercase__ : int = self.value_function(x.permute(0 ,2 ,1 ) ,_snake_case ).sample
lowercase__ : Optional[Any] = torch.autograd.grad([y.sum()] ,[x] )[0]
lowercase__ : List[str] = self.scheduler._get_variance(_snake_case )
lowercase__ : Union[str, Any] = torch.exp(0.5 * posterior_variance )
lowercase__ : Optional[int] = model_std * grad
lowercase__ : Optional[Any] = 0
lowercase__ : str = x.detach()
lowercase__ : Dict = x + scale * grad
lowercase__ : str = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.unet(x.permute(0 ,2 ,1 ) ,_snake_case ).sample.permute(0 ,2 ,1 )
# TODO: verify deprecation of this kwarg
lowercase__ : Dict = self.scheduler.step(_snake_case ,_snake_case ,_snake_case ,predict_epsilon=_snake_case )['''prev_sample''']
# apply conditions to the trajectory (set the initial state)
lowercase__ : Dict = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : Union[str, Any] = self.to_torch(_snake_case )
return x, y
def __call__( self : Union[str, Any] ,_snake_case : Any ,_snake_case : Tuple=64 ,_snake_case : Any=32 ,_snake_case : Optional[Any]=2 ,_snake_case : str=0.1 ) -> List[Any]:
"""simple docstring"""
lowercase__ : Any = self.normalize(_snake_case ,'''observations''' )
lowercase__ : Tuple = obs[None].repeat(_snake_case ,axis=0 )
lowercase__ : Dict = {0: self.to_torch(_snake_case )}
lowercase__ : int = (batch_size, planning_horizon, self.state_dim + self.action_dim)
# generate initial noise and apply our conditions (to make the trajectories start at current state)
lowercase__ : Optional[int] = randn_tensor(_snake_case ,device=self.unet.device )
lowercase__ : Tuple = self.reset_xa(_snake_case ,_snake_case ,self.action_dim )
lowercase__ : str = self.to_torch(_snake_case )
# run the diffusion process
lowercase__ , lowercase__ : int = self.run_diffusion(_snake_case ,_snake_case ,_snake_case ,_snake_case )
# sort output trajectories by value
lowercase__ : Optional[Any] = y.argsort(0 ,descending=_snake_case ).squeeze()
lowercase__ : str = x[sorted_idx]
lowercase__ : str = sorted_values[:, :, : self.action_dim]
lowercase__ : Optional[int] = actions.detach().cpu().numpy()
lowercase__ : List[str] = self.de_normalize(_snake_case ,key='''actions''' )
# select the action with the highest value
if y is not None:
lowercase__ : str = 0
else:
# if we didn't run value guiding, select a random action
lowercase__ : str = np.random.randint(0 ,_snake_case )
lowercase__ : int = denorm_actions[selected_index, 0]
return denorm_actions
| 302
| 0
|
'''simple docstring'''
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
__a: List[Any] = logging.get_logger(__name__)
def __UpperCamelCase ( UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = RobertaPreLayerNormConfig.from_pretrained(
UpperCAmelCase , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase__ : Tuple = torch.load(hf_hub_download(repo_id=UpperCAmelCase , filename='''pytorch_model.bin''' ) )
lowercase__ : List[str] = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase__ : Tuple = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase__ : List[Any] = tensor_value
lowercase__ : Tuple = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=UpperCAmelCase , config=UpperCAmelCase , state_dict=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
# convert tokenizer
lowercase__ : List[str] = AutoTokenizer.from_pretrained(UpperCAmelCase )
tokenizer.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
__a: Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint-repo""",
default=None,
type=str,
required=True,
help="""Path the official PyTorch dump, e.g. 'andreasmadsen/efficient_mlm_m0.40'.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__a: Dict = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 198
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase ( a__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = CTRLTokenizer
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _lowerCAmelCase( self ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Optional[Any] = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
lowercase__ : str = dict(zip(__lowerCAmelCase , range(len(__lowerCAmelCase ) ) ) )
lowercase__ : Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
lowercase__ : Optional[Any] = {'''unk_token''': '''<unk>'''}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowerCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowerCAmelCase ) )
def _lowerCAmelCase( self , **__lowerCAmelCase ) -> List[str]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__lowerCAmelCase )
def _lowerCAmelCase( self , __lowerCAmelCase ) -> List[str]:
lowercase__ : List[str] = '''adapt react readapt apt'''
lowercase__ : Union[str, Any] = '''adapt react readapt apt'''
return input_text, output_text
def _lowerCAmelCase( self ) -> Optional[Any]:
lowercase__ : Union[str, Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Optional[Any] = '''adapt react readapt apt'''
lowercase__ : Dict = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
lowercase__ : Union[str, Any] = tokenizer.tokenize(__lowerCAmelCase )
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase )
lowercase__ : int = tokens + [tokenizer.unk_token]
lowercase__ : List[str] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCAmelCase ) , __lowerCAmelCase )
| 198
| 1
|
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict:
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class __UpperCAmelCase ( _lowerCamelCase ):
@staticmethod
def lowerCamelCase ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=lowerCAmelCase_ , help='Name of the model to download' )
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = model
_snake_case = cache
_snake_case = force
_snake_case = trust_remote_code
def lowerCamelCase ( self ):
"""simple docstring"""
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
| 160
|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """SpeechT5FeatureExtractor"""
__lowercase = """SpeechT5Tokenizer"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
def __call__( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('audio' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text' , lowerCAmelCase_ )
_snake_case = kwargs.pop('text_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('audio_target' , lowerCAmelCase_ )
_snake_case = kwargs.pop('sampling_rate' , lowerCAmelCase_ )
if audio is not None and text is not None:
raise ValueError(
'Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?' )
if audio_target is not None and text_target is not None:
raise ValueError(
'Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.' )
if audio is not None:
_snake_case = self.feature_extractor(lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
elif text is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if audio_target is not None:
_snake_case = self.feature_extractor(audio_target=lowerCAmelCase_ , *lowerCAmelCase_ , sampling_rate=lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_values']
elif text_target is not None:
_snake_case = self.tokenizer(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = kwargs.pop('input_values' , lowerCAmelCase_ )
_snake_case = kwargs.pop('input_ids' , lowerCAmelCase_ )
_snake_case = kwargs.pop('labels' , lowerCAmelCase_ )
if input_values is not None and input_ids is not None:
raise ValueError('Cannot process both `input_values` and `input_ids` inputs.' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.' )
if input_values is not None:
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
elif input_ids is not None:
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
else:
_snake_case = None
if labels is not None:
if "input_ids" in labels or (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and "input_ids" in labels[0]):
_snake_case = self.tokenizer.pad(lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = targets['input_ids']
else:
_snake_case = self.feature_extractor.feature_size
_snake_case = self.feature_extractor.num_mel_bins
_snake_case = self.feature_extractor.pad(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
_snake_case = feature_size_hack
_snake_case = targets['input_values']
else:
_snake_case = None
if inputs is None:
return targets
if targets is not None:
_snake_case = labels
_snake_case = targets.get('attention_mask' )
if decoder_attention_mask is not None:
_snake_case = decoder_attention_mask
return inputs
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
| 160
| 1
|
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Any = ''
a :Optional[Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[DatasetInfo] = None , SCREAMING_SNAKE_CASE_ : Optional[str] = None , **SCREAMING_SNAKE_CASE_ : Union[str, Any] , ) -> int:
super().__init__(self , **SCREAMING_SNAKE_CASE_ )
lowercase_ = repo_info
lowercase_ = token
lowercase_ = None
def _lowercase ( self : Dict ) -> Optional[Any]:
if self.dir_cache is None:
lowercase_ = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowercase_ = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(SCREAMING_SNAKE_CASE_ ): {'''name''': str(SCREAMING_SNAKE_CASE_ ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str = "rb" , **SCREAMING_SNAKE_CASE_ : Tuple , ) -> Optional[Any]:
if not isinstance(self.repo_info , SCREAMING_SNAKE_CASE_ ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowercase_ = hf_hub_url(self.repo_info.id , SCREAMING_SNAKE_CASE_ , revision=self.repo_info.sha )
return fsspec.open(
SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ , headers=get_authentication_headers_for_url(SCREAMING_SNAKE_CASE_ , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Optional[Any] , **SCREAMING_SNAKE_CASE_ : Dict ) -> int:
self._get_dirs()
lowercase_ = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : int=False , **SCREAMING_SNAKE_CASE_ : Optional[Any] ) -> Optional[int]:
self._get_dirs()
lowercase_ = PurePosixPath(path.strip('''/''' ) )
lowercase_ = {}
for p, f in self.dir_cache.items():
lowercase_ = PurePosixPath(p.strip('''/''' ) )
lowercase_ = p.parent
if root == path:
lowercase_ = f
lowercase_ = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 30
|
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase__:
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : MutableSequence[float] ) -> None:
if len(SCREAMING_SNAKE_CASE_ ) != degree + 1:
raise ValueError(
'''The number of coefficients should be equal to the degree + 1.''' )
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
lowercase_ = degree
def __add__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
lowercase_ = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , SCREAMING_SNAKE_CASE_ )
else:
lowercase_ = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def __sub__( self : str , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : int ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Any , SCREAMING_SNAKE_CASE_ : Polynomial ) -> Polynomial:
lowercase_ = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE_ : int | float ) -> int | float:
lowercase_ = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Tuple ) -> str:
lowercase_ = ''''''
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(SCREAMING_SNAKE_CASE_ )
return polynomial
def __repr__( self : Optional[Any] ) -> str:
return self.__str__()
def _lowercase ( self : int ) -> Polynomial:
lowercase_ = [0] * self.degree
for i in range(self.degree ):
lowercase_ = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , SCREAMING_SNAKE_CASE_ )
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : int | float = 0 ) -> Polynomial:
lowercase_ = [0] * (self.degree + 2)
lowercase_ = constant
for i in range(self.degree + 1 ):
lowercase_ = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , SCREAMING_SNAKE_CASE_ )
def __eq__( self : str , SCREAMING_SNAKE_CASE_ : object ) -> bool:
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , SCREAMING_SNAKE_CASE_ : object ) -> bool:
return not self.__eq__(SCREAMING_SNAKE_CASE_ )
| 30
| 1
|
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {"""type""": """section""", """text""": {"""type""": """plain_text""", """text""": """No failed tests! 🤗""", """emoji""": True}}
UpperCAmelCase_ = [
{
"""type""": """header""",
"""text""": {
"""type""": """plain_text""",
"""text""": f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
"""emoji""": True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line["""nodeid"""]
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = """"""
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = """Too many failed tests, please see the full report in the Action results."""
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = """No failed tests! 🤗"""
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": """*For more details:*""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {
"""type""": """plain_text""",
"""text""": """Check Action results""",
"""emoji""": True,
},
"""url""": f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
"""type""": """context""",
"""elements""": [
{
"""type""": """plain_text""",
"""text""": f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data["""ts"""]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = """"""
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = """"""
UpperCAmelCase_ = {
"""type""": """section""",
"""text""": {
"""type""": """mrkdwn""",
"""text""": f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 363
|
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
UpperCAmelCase_ = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase)
class lowerCamelCase__( __lowerCamelCase):
def __init__( self: str , **UpperCamelCase_: int ):
super().__init__(**UpperCamelCase_ )
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.' )
requires_backends(self , """vision""" )
self.check_model_type(UpperCamelCase_ )
def __call__( self: Union[str, Any] , UpperCamelCase_: Union[str, "Image.Image", List[Dict[str, Any]]] , UpperCamelCase_: Union[str, List[str]] = None , **UpperCamelCase_: List[str] , ):
if "text_queries" in kwargs:
__lowerCamelCase = kwargs.pop("""text_queries""" )
if isinstance(UpperCamelCase_ , (str, Image.Image) ):
__lowerCamelCase = {"""image""": image, """candidate_labels""": candidate_labels}
else:
__lowerCamelCase = image
__lowerCamelCase = super().__call__(UpperCamelCase_ , **UpperCamelCase_ )
return results
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: Dict ):
__lowerCamelCase = {}
if "threshold" in kwargs:
__lowerCamelCase = kwargs["""threshold"""]
if "top_k" in kwargs:
__lowerCamelCase = kwargs["""top_k"""]
return {}, {}, postprocess_params
def lowerCAmelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
__lowerCamelCase = load_image(inputs["""image"""] )
__lowerCamelCase = inputs["""candidate_labels"""]
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
__lowerCamelCase = candidate_labels.split(""",""" )
__lowerCamelCase = torch.tensor([[image.height, image.width]] , dtype=torch.intaa )
for i, candidate_label in enumerate(UpperCamelCase_ ):
__lowerCamelCase = self.tokenizer(UpperCamelCase_ , return_tensors=self.framework )
__lowerCamelCase = self.image_processor(UpperCamelCase_ , return_tensors=self.framework )
yield {
"is_last": i == len(UpperCamelCase_ ) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Tuple ):
__lowerCamelCase = model_inputs.pop("""target_size""" )
__lowerCamelCase = model_inputs.pop("""candidate_label""" )
__lowerCamelCase = model_inputs.pop("""is_last""" )
__lowerCamelCase = self.model(**UpperCamelCase_ )
__lowerCamelCase = {"""target_size""": target_size, """candidate_label""": candidate_label, """is_last""": is_last, **outputs}
return model_outputs
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: Any , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: Union[str, Any]=None ):
__lowerCamelCase = []
for model_output in model_outputs:
__lowerCamelCase = model_output["""candidate_label"""]
__lowerCamelCase = BaseModelOutput(UpperCamelCase_ )
__lowerCamelCase = self.image_processor.post_process_object_detection(
outputs=UpperCamelCase_ , threshold=UpperCamelCase_ , target_sizes=model_output["""target_size"""] )[0]
for index in outputs["scores"].nonzero():
__lowerCamelCase = outputs["""scores"""][index].item()
__lowerCamelCase = self._get_bounding_box(outputs["""boxes"""][index][0] )
__lowerCamelCase = {"""score""": score, """label""": label, """box""": box}
results.append(UpperCamelCase_ )
__lowerCamelCase = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x["score"] , reverse=UpperCamelCase_ )
if top_k:
__lowerCamelCase = results[:top_k]
return results
def lowerCAmelCase__ ( self: Optional[Any] , UpperCamelCase_: "torch.Tensor" ):
if self.framework != "pt":
raise ValueError("""The ZeroShotObjectDetectionPipeline is only available in PyTorch.""" )
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = box.int().tolist()
__lowerCamelCase = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 29
| 0
|
import collections
import importlib.util
import os
import re
from pathlib import Path
_A = 'src/transformers'
# Matches is_xxx_available()
_A = re.compile(R'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
_A = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
_A = re.compile(R'\s+\"\S*\":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
_A = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
_A = re.compile(R'^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
_A = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
_A = re.compile('^\s+\"([^\"]+)\",')
# Catches a line with objects between brackets only: ["foo", "bar"],
_A = re.compile('^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
_A = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
_A = re.compile(R'^\s*try:')
# Catches a line with else:
_A = re.compile(R'^\s*else:')
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
if _re_test_backend.search(lowercase__ ) is None:
return None
__UpperCamelCase =[b[0] for b in _re_backend.findall(lowercase__ )]
backends.sort()
return "_and_".join(lowercase__ )
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Dict ):
with open(lowercase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
__UpperCamelCase =f.readlines()
__UpperCamelCase =0
while line_index < len(lowercase__ ) and not lines[line_index].startswith('_import_structure = {' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(lowercase__ ):
return None
# First grab the objects without a specific backend in _import_structure
__UpperCamelCase =[]
while not lines[line_index].startswith('if TYPE_CHECKING' ) and find_backend(lines[line_index] ) is None:
__UpperCamelCase =lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(lowercase__ ):
__UpperCamelCase =_re_one_line_import_struct.search(lowercase__ ).groups()[0]
__UpperCamelCase =re.findall('\[([^\]]+)\]' , lowercase__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(', ' )] )
line_index += 1
continue
__UpperCamelCase =_re_import_struct_key_value.search(lowercase__ )
if single_line_import_search is not None:
__UpperCamelCase =[obj[1:-1] for obj in single_line_import_search.groups()[0].split(', ' ) if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
line_index += 1
__UpperCamelCase ={'none': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('if TYPE_CHECKING' ):
# If the line is an if not is_backend_available, we grab all objects associated.
__UpperCamelCase =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCamelCase =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCamelCase =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 4 ):
__UpperCamelCase =lines[line_index]
if _re_import_struct_add_one.search(lowercase__ ) is not None:
objects.append(_re_import_struct_add_one.search(lowercase__ ).groups()[0] )
elif _re_import_struct_add_many.search(lowercase__ ) is not None:
__UpperCamelCase =_re_import_struct_add_many.search(lowercase__ ).groups()[0].split(', ' )
__UpperCamelCase =[obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_between_brackets.search(lowercase__ ) is not None:
__UpperCamelCase =_re_between_brackets.search(lowercase__ ).groups()[0].split(', ' )
__UpperCamelCase =[obj[1:-1] for obj in imports if len(lowercase__ ) > 0]
objects.extend(lowercase__ )
elif _re_quote_object.search(lowercase__ ) is not None:
objects.append(_re_quote_object.search(lowercase__ ).groups()[0] )
elif line.startswith(' ' * 8 + '"' ):
objects.append(line[9:-3] )
elif line.startswith(' ' * 12 + '"' ):
objects.append(line[13:-3] )
line_index += 1
__UpperCamelCase =objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
__UpperCamelCase =[]
while (
line_index < len(lowercase__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('else' )
):
__UpperCamelCase =lines[line_index]
__UpperCamelCase =_re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
__UpperCamelCase ={'none': objects}
# Let's continue with backend-specific objects
while line_index < len(lowercase__ ):
# If the line is an if is_backend_available, we grab all objects associated.
__UpperCamelCase =find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
__UpperCamelCase =None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
__UpperCamelCase =[]
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(' ' * 8 ):
__UpperCamelCase =lines[line_index]
__UpperCamelCase =_re_import.search(lowercase__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 12 ):
objects.append(line[12:-2] )
line_index += 1
__UpperCamelCase =objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] ):
def find_duplicates(SCREAMING_SNAKE_CASE__ : Tuple ):
return [k for k, v in collections.Counter(lowercase__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
__UpperCamelCase =[]
for key in import_dict_objects.keys():
__UpperCamelCase =find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(F'Duplicate _import_structure definitions for: {duplicate_imports}' )
__UpperCamelCase =find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(F'Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}' )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
__UpperCamelCase ='base imports' if key == 'none' else F'{key} backend'
errors.append(F'Differences for {name}:' )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(F' {a} in TYPE_HINT but not in _import_structure.' )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(F' {a} in _import_structure but not in TYPE_HINT.' )
return errors
def _UpperCAmelCase ( ):
__UpperCamelCase =[]
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
__UpperCamelCase =os.path.join(lowercase__ , '__init__.py' )
__UpperCamelCase =parse_init(lowercase__ )
if objects is not None:
__UpperCamelCase =analyze_results(*lowercase__ )
if len(lowercase__ ) > 0:
__UpperCamelCase =F'Problem in {fname}, both halves do not define the same objects.\n{errors[0]}'
failures.append('\n'.join(lowercase__ ) )
if len(lowercase__ ) > 0:
raise ValueError('\n\n'.join(lowercase__ ) )
def _UpperCAmelCase ( ):
__UpperCamelCase =[]
for path, directories, files in os.walk(lowercase__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('_' ):
directories.remove(lowercase__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(lowercase__ ) / folder).glob('*.py' ) ) ) == 0:
continue
__UpperCamelCase =str((Path(lowercase__ ) / folder).relative_to(lowercase__ ) )
__UpperCamelCase =short_path.replace(os.path.sep , '.' )
submodules.append(lowercase__ )
for fname in files:
if fname == "__init__.py":
continue
__UpperCamelCase =str((Path(lowercase__ ) / fname).relative_to(lowercase__ ) )
__UpperCamelCase =short_path.replace('.py' , '' ).replace(os.path.sep , '.' )
if len(submodule.split('.' ) ) == 1:
submodules.append(lowercase__ )
return submodules
_A = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
]
def _UpperCAmelCase ( ):
# This is to make sure the transformers module imported is the one in the repo.
__UpperCamelCase =importlib.util.spec_from_file_location(
'transformers' , os.path.join(lowercase__ , '__init__.py' ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , )
__UpperCamelCase =spec.loader.load_module()
__UpperCamelCase =[
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys()
]
if len(lowercase__ ) > 0:
__UpperCamelCase ='\n'.join(F'- {module}' for module in module_not_registered )
raise ValueError(
'The following submodules are not properly registered in the main init of Transformers:\n'
F'{list_of_modules}\n'
'Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 62
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Any = data
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self ):
_lowerCamelCase : str = None
_lowerCamelCase : str = None
def __iter__( self ):
_lowerCamelCase : List[str] = self.head
while self.head:
yield node.data
_lowerCamelCase : Optional[int] = node.next
if node == self.head:
break
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join(str(lowercase ) for item in iter(self ) )
def A_ ( self , lowercase ):
self.insert_nth(len(self ) , lowercase )
def A_ ( self , lowercase ):
self.insert_nth(0 , lowercase )
def A_ ( self , lowercase , lowercase ):
if index < 0 or index > len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : List[Any] = Node(lowercase )
if self.head is None:
_lowerCamelCase : str = new_node # first node points itself
_lowerCamelCase : Union[str, Any] = new_node
elif index == 0: # insert at head
_lowerCamelCase : List[str] = self.head
_lowerCamelCase : str = new_node
else:
_lowerCamelCase : Union[str, Any] = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : Union[str, Any] = temp.next
_lowerCamelCase : List[str] = new_node
if index == len(self ) - 1: # insert at tail
_lowerCamelCase : Any = new_node
def A_ ( self ):
return self.delete_nth(0 )
def A_ ( self ):
return self.delete_nth(len(self ) - 1 )
def A_ ( self , lowercase = 0 ):
if not 0 <= index < len(self ):
raise IndexError('list index out of range.' )
_lowerCamelCase : Any = self.head
if self.head == self.tail: # just one node
_lowerCamelCase : List[str] = None
elif index == 0: # delete head node
_lowerCamelCase : List[str] = self.tail.next.next
_lowerCamelCase : Optional[int] = self.head.next
else:
_lowerCamelCase : Dict = self.head
for _ in range(index - 1 ):
_lowerCamelCase : List[Any] = temp.next
_lowerCamelCase : int = temp.next
_lowerCamelCase : Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
_lowerCamelCase : List[Any] = temp
return delete_node.data
def A_ ( self ):
return len(self ) == 0
def _snake_case ( ):
_lowerCamelCase : Union[str, Any] = CircularLinkedList()
assert len(lowercase__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(lowercase__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(lowercase__ ) == i
circular_linked_list.insert_nth(lowercase__ , i + 1 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(lowercase__ ) == "->".join(str(lowercase__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
| 0
|
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
UpperCAmelCase__ = logging.get_logger("transformers.models.speecht5")
def _a ( a :Optional[Any] , a :Tuple , a :Dict ) -> List[str]:
hf_model.apply_weight_norm()
a = checkpoint['''input_conv.weight_g''']
a = checkpoint['''input_conv.weight_v''']
a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
a = checkpoint[F"""upsamples.{i}.1.weight_g"""]
a = checkpoint[F"""upsamples.{i}.1.weight_v"""]
a = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
a = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
a = checkpoint['''output_conv.1.weight_g''']
a = checkpoint['''output_conv.1.weight_v''']
a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def _a ( a :List[str] , a :Union[str, Any] , a :Dict , a :Dict=None , a :List[Any]=None , ) -> int:
if config_path is not None:
a = SpeechTaHifiGanConfig.from_pretrained(a )
else:
a = SpeechTaHifiGanConfig()
a = SpeechTaHifiGan(a )
a = torch.load(a )
load_weights(orig_checkpoint['''model''']['''generator'''] , a , a )
a = np.load(a )
a = stats[0].reshape(-1 )
a = stats[1].reshape(-1 )
a = torch.from_numpy(a ).float()
a = torch.from_numpy(a ).float()
model.save_pretrained(a )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to original checkpoint")
parser.add_argument("--stats_path", required=True, default=None, type=str, help="Path to stats.npy file")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
UpperCAmelCase__ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 26
|
UpperCAmelCase__ = "0.21.0"
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 26
| 1
|
'''simple docstring'''
def _lowerCamelCase ( lowercase : list[int] , lowercase : list[int] ) -> None:
_a = len(lowercase )
print("The following activities are selected:" )
# The first activity is always selected
_a = 0
print(lowercase , end="," )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end="," )
_a = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase_ : Any = [1, 3, 0, 5, 8, 5]
lowerCAmelCase_ : Optional[int] = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 63
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_efficientnet""": [
"""EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EfficientNetConfig""",
"""EfficientNetOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""EfficientNetImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EfficientNetForImageClassification""",
"""EfficientNetModel""",
"""EfficientNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_efficientnet import (
EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
EfficientNetConfig,
EfficientNetOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientnet import EfficientNetImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientnet import (
EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientNetForImageClassification,
EfficientNetModel,
EfficientNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 302
| 0
|
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class __UpperCAmelCase (__lowercase ):
@slow
@require_torch
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = EncoderDecoderModel.from_encoder_decoder_pretrained("""prajjwal1/bert-tiny""" , """prajjwal1/bert-tiny""" )
_SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_SCREAMING_SNAKE_CASE = bertabert.config.encoder.vocab_size
_SCREAMING_SNAKE_CASE = tokenizer.sep_token_id
_SCREAMING_SNAKE_CASE = tokenizer.cls_token_id
_SCREAMING_SNAKE_CASE = 128
_SCREAMING_SNAKE_CASE = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""train[:1%]""" )
_SCREAMING_SNAKE_CASE = datasets.load_dataset("""cnn_dailymail""" , """3.0.0""" , split="""validation[:1%]""" )
_SCREAMING_SNAKE_CASE = train_dataset.select(range(32 ) )
_SCREAMING_SNAKE_CASE = val_dataset.select(range(16 ) )
_SCREAMING_SNAKE_CASE = 4
def _map_to_encoder_decoder_inputs(UpperCAmelCase_: Optional[Any] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
_SCREAMING_SNAKE_CASE = tokenizer(batch["""article"""] , padding="""max_length""" , truncation=_a , max_length=512 )
_SCREAMING_SNAKE_CASE = tokenizer(batch["""highlights"""] , padding="""max_length""" , truncation=_a , max_length=128 )
_SCREAMING_SNAKE_CASE = inputs.input_ids
_SCREAMING_SNAKE_CASE = inputs.attention_mask
_SCREAMING_SNAKE_CASE = outputs.input_ids
_SCREAMING_SNAKE_CASE = outputs.input_ids.copy()
_SCREAMING_SNAKE_CASE = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
_SCREAMING_SNAKE_CASE = outputs.attention_mask
assert all(len(_a ) == 512 for x in inputs.input_ids )
assert all(len(_a ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(UpperCAmelCase_: Any ):
_SCREAMING_SNAKE_CASE = pred.label_ids
_SCREAMING_SNAKE_CASE = pred.predictions
# all unnecessary tokens are removed
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_a , skip_special_tokens=_a )
_SCREAMING_SNAKE_CASE = sum([int(pred_str[i] == label_str[i] ) for i in range(len(_a ) )] ) / len(_a )
return {"accuracy": accuracy}
# map train dataset
_SCREAMING_SNAKE_CASE = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=["""article""", """highlights"""] , )
train_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
# same for validation dataset
_SCREAMING_SNAKE_CASE = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=_a , batch_size=_a , remove_columns=["""article""", """highlights"""] , )
val_dataset.set_format(
type="""torch""" , columns=["""input_ids""", """attention_mask""", """decoder_input_ids""", """decoder_attention_mask""", """labels"""] , )
_SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
_SCREAMING_SNAKE_CASE = SeqaSeqTrainingArguments(
output_dir=_a , per_device_train_batch_size=_a , per_device_eval_batch_size=_a , predict_with_generate=_a , evaluation_strategy="""steps""" , do_train=_a , do_eval=_a , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
_SCREAMING_SNAKE_CASE = SeqaSeqTrainer(
model=_a , args=_a , compute_metrics=_compute_metrics , train_dataset=_a , eval_dataset=_a , tokenizer=_a , )
# start training
trainer.train()
| 365
|
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase = logging.get_logger(__name__)
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Dict:
"""simple docstring"""
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
_SCREAMING_SNAKE_CASE = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
_SCREAMING_SNAKE_CASE = in_proj_weight[
: encoder_config.hidden_size, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
_SCREAMING_SNAKE_CASE = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = dct.pop(snake_case__ )
_SCREAMING_SNAKE_CASE = val
def __lowerCamelCase ( snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if "handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
_SCREAMING_SNAKE_CASE = Image.open(requests.get(snake_case__ ,stream=snake_case__ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def __lowerCamelCase ( snake_case__ ,snake_case__ ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ViTConfig(image_size=3_84 ,qkv_bias=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
_SCREAMING_SNAKE_CASE = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = 40_96
_SCREAMING_SNAKE_CASE = 24
_SCREAMING_SNAKE_CASE = 16
_SCREAMING_SNAKE_CASE = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = """relu"""
_SCREAMING_SNAKE_CASE = 10_24
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
# load HuggingFace model
_SCREAMING_SNAKE_CASE = ViTModel(snake_case__ ,add_pooling_layer=snake_case__ )
_SCREAMING_SNAKE_CASE = TrOCRForCausalLM(snake_case__ )
_SCREAMING_SNAKE_CASE = VisionEncoderDecoderModel(encoder=snake_case__ ,decoder=snake_case__ )
model.eval()
# load state_dict of original model, rename some keys
_SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(snake_case__ ,map_location="""cpu""" ,check_hash=snake_case__ )["""model"""]
_SCREAMING_SNAKE_CASE = create_rename_keys(snake_case__ ,snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ ,snake_case__ ,snake_case__ )
read_in_q_k_v(snake_case__ ,snake_case__ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
_SCREAMING_SNAKE_CASE = state_dict.pop(snake_case__ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
_SCREAMING_SNAKE_CASE = val
else:
_SCREAMING_SNAKE_CASE = val
# load state dict
model.load_state_dict(snake_case__ )
# Check outputs on an image
_SCREAMING_SNAKE_CASE = ViTImageProcessor(size=encoder_config.image_size )
_SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained("""roberta-large""" )
_SCREAMING_SNAKE_CASE = TrOCRProcessor(snake_case__ ,snake_case__ )
_SCREAMING_SNAKE_CASE = processor(images=prepare_img(snake_case__ ) ,return_tensors="""pt""" ).pixel_values
# verify logits
_SCREAMING_SNAKE_CASE = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
_SCREAMING_SNAKE_CASE = model(pixel_values=snake_case__ ,decoder_input_ids=snake_case__ )
_SCREAMING_SNAKE_CASE = outputs.logits
_SCREAMING_SNAKE_CASE = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-1.4_502, -4.6_683, -0.5_347, -2.9_291, 9.1_435, -3.0_571, 8.9_764, 1.7_560, 8.7_358, -1.5_311] )
elif "trocr-large-handwritten" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-2.6_437, -1.3_129, -2.2_596, -5.3_455, 6.3_539, 1.7_604, 5.4_991, 1.4_702, 5.6_113, 2.0_170] )
elif "trocr-base-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-5.6_816, -5.8_388, 1.1_398, -6.9_034, 6.8_505, -2.4_393, 1.2_284, -1.0_232, -1.9_661, -3.9_210] )
elif "trocr-large-printed" in checkpoint_url:
_SCREAMING_SNAKE_CASE = torch.tensor(
[-6.0_162, -7.0_959, 4.4_155, -5.1_063, 7.0_468, -3.1_631, 2.6_466, -0.3_081, -0.8_106, -1.7_535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] ,snake_case__ ,atol=1e-3 ), "First elements of logits not as expected"
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(snake_case__ )
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
UpperCamelCase = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 125
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A = {
'''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''],
'''tokenization_luke''': ['''LukeTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A = [
'''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LukeForEntityClassification''',
'''LukeForEntityPairClassification''',
'''LukeForEntitySpanClassification''',
'''LukeForMultipleChoice''',
'''LukeForQuestionAnswering''',
'''LukeForSequenceClassification''',
'''LukeForTokenClassification''',
'''LukeForMaskedLM''',
'''LukeModel''',
'''LukePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
A = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 160
|
"""simple docstring"""
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = ['''image_processor''']
__lowerCAmelCase = '''SamImageProcessor'''
def __init__( self , _UpperCAmelCase ):
super().__init__(_UpperCAmelCase )
__a : Any = self.image_processor
__a : List[Any] = -10
__a : str = self.image_processor.size['''longest_edge''']
def __call__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase = None , **_UpperCAmelCase , ):
__a : Tuple = self.image_processor(
_UpperCAmelCase , return_tensors=_UpperCAmelCase , **_UpperCAmelCase , )
# pop arguments that are not used in the foward but used nevertheless
__a : Optional[Any] = encoding_image_processor['''original_sizes''']
if hasattr(_UpperCAmelCase , '''numpy''' ): # Checks if Torch or TF tensor
__a : Optional[Any] = original_sizes.numpy()
__a , __a , __a : int = self._check_and_preprocess_points(
input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , )
__a : List[Any] = self._normalize_and_convert(
_UpperCAmelCase , _UpperCAmelCase , input_points=_UpperCAmelCase , input_labels=_UpperCAmelCase , input_boxes=_UpperCAmelCase , return_tensors=_UpperCAmelCase , )
return encoding_image_processor
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="pt" , ):
if input_points is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
__a : Dict = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] ) for point in input_points
]
else:
__a : Dict = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase )
for point, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__a , __a : Tuple = self._pad_points_and_labels(_UpperCAmelCase , _UpperCAmelCase )
__a : List[Any] = np.array(_UpperCAmelCase )
if input_labels is not None:
__a : List[Any] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if len(_UpperCAmelCase ) != len(_UpperCAmelCase ):
__a : Any = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , original_sizes[0] , is_bounding_box=_UpperCAmelCase )
for box in input_boxes
]
else:
__a : int = [
self._normalize_coordinates(self.target_size , _UpperCAmelCase , _UpperCAmelCase , is_bounding_box=_UpperCAmelCase )
for box, original_size in zip(_UpperCAmelCase , _UpperCAmelCase )
]
__a : Optional[int] = np.array(_UpperCAmelCase )
if input_boxes is not None:
if return_tensors == "pt":
__a : Any = torch.from_numpy(_UpperCAmelCase )
# boxes batch size of 1 by default
__a : str = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__a : Dict = tf.convert_to_tensor(_UpperCAmelCase )
# boxes batch size of 1 by default
__a : str = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({'''input_boxes''': input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__a : int = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
__a : Optional[Any] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__a : List[Any] = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
__a : Optional[Any] = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({'''input_points''': input_points} )
if input_labels is not None:
if return_tensors == "pt":
__a : Any = torch.from_numpy(_UpperCAmelCase )
# point batch size of 1 by default
__a : Union[str, Any] = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__a : str = tf.convert_to_tensor(_UpperCAmelCase )
# point batch size of 1 by default
__a : Dict = tf.expand_dims(_UpperCAmelCase , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({'''input_labels''': input_labels} )
return encoding_image_processor
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = max([point.shape[0] for point in input_points] )
__a : Dict = []
for i, point in enumerate(_UpperCAmelCase ):
if point.shape[0] != expected_nb_points:
__a : Any = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__a : List[Any] = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(_UpperCAmelCase )
__a : int = processed_input_points
return input_points, input_labels
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False ):
__a , __a : str = original_size
__a , __a : Optional[int] = self.image_processor._get_preprocess_shape(_UpperCAmelCase , longest_edge=_UpperCAmelCase )
__a : List[str] = deepcopy(_UpperCAmelCase ).astype(_UpperCAmelCase )
if is_bounding_box:
__a : Optional[int] = coords.reshape(-1 , 2 , 2 )
__a : str = coords[..., 0] * (new_w / old_w)
__a : List[Any] = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__a : List[Any] = coords.reshape(-1 , 4 )
return coords
def _lowerCamelCase ( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , ):
if input_points is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ): # Checks for TF or Torch tensor
__a : str = input_points.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_points[0] , _UpperCAmelCase ):
raise ValueError('''Input points must be a list of list of floating points.''' )
__a : str = [np.array(_UpperCAmelCase ) for input_point in input_points]
else:
__a : Optional[int] = None
if input_labels is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ):
__a : Dict = input_labels.numpy().tolist()
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ) or not isinstance(input_labels[0] , _UpperCAmelCase ):
raise ValueError('''Input labels must be a list of list integers.''' )
__a : Dict = [np.array(_UpperCAmelCase ) for label in input_labels]
else:
__a : Tuple = None
if input_boxes is not None:
if hasattr(_UpperCAmelCase , '''numpy''' ):
__a : List[Any] = input_boxes.numpy().tolist()
if (
not isinstance(_UpperCAmelCase , _UpperCAmelCase )
or not isinstance(input_boxes[0] , _UpperCAmelCase )
or not isinstance(input_boxes[0][0] , _UpperCAmelCase )
):
raise ValueError('''Input boxes must be a list of list of list of floating points.''' )
__a : Optional[Any] = [np.array(_UpperCAmelCase ).astype(np.floataa ) for box in input_boxes]
else:
__a : Union[str, Any] = None
return input_points, input_labels, input_boxes
@property
def _lowerCamelCase ( self ):
__a : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(_UpperCAmelCase ) )
def _lowerCamelCase ( self , *_UpperCAmelCase , **_UpperCAmelCase ):
return self.image_processor.post_process_masks(*_UpperCAmelCase , **_UpperCAmelCase )
| 160
| 1
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
_A = logging.get_logger(__name__)
class _lowercase ( __UpperCAmelCase ):
def __init__( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> None:
warnings.warn(
'The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use SegformerImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 365
|
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
_A = logging.get_logger(__name__)
_A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
# See all BART models at https://huggingface.co/models?filter=bart
_A = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
'tokenizer_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json',
},
}
_A = {
'facebook/bart-base': 1_0_2_4,
'facebook/bart-large': 1_0_2_4,
'facebook/bart-large-mnli': 1_0_2_4,
'facebook/bart-large-cnn': 1_0_2_4,
'facebook/bart-large-xsum': 1_0_2_4,
'yjernite/bart_eli5': 1_0_2_4,
}
class _lowercase ( __UpperCAmelCase ):
lowercase_ = VOCAB_FILES_NAMES
lowercase_ = PRETRAINED_VOCAB_FILES_MAP
lowercase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase_ = ['input_ids', 'attention_mask']
lowercase_ = BartTokenizer
def __init__( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_="replace" , UpperCAmelCase_="<s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="</s>" , UpperCAmelCase_="<s>" , UpperCAmelCase_="<unk>" , UpperCAmelCase_="<pad>" , UpperCAmelCase_="<mask>" , UpperCAmelCase_=False , UpperCAmelCase_=True , **UpperCAmelCase_ , ) -> Union[str, Any]:
super().__init__(
UpperCAmelCase_ , UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , trim_offsets=UpperCAmelCase_ , **UpperCAmelCase_ , )
lowerCamelCase : List[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
lowerCamelCase : Tuple = getattr(UpperCAmelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase : Optional[Any] = add_prefix_space
lowerCamelCase : str = pre_tok_class(**UpperCAmelCase_ )
lowerCamelCase : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase : Dict = 'post_processor'
lowerCamelCase : str = getattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
if tokenizer_component_instance:
lowerCamelCase : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase : int = tuple(state['sep'] )
if "cls" in state:
lowerCamelCase : str = tuple(state['cls'] )
lowerCamelCase : Optional[Any] = False
if state.get('add_prefix_space' , UpperCAmelCase_ ) != add_prefix_space:
lowerCamelCase : Dict = add_prefix_space
lowerCamelCase : Tuple = True
if state.get('trim_offsets' , UpperCAmelCase_ ) != trim_offsets:
lowerCamelCase : Tuple = trim_offsets
lowerCamelCase : Dict = True
if changes_to_apply:
lowerCamelCase : Optional[int] = getattr(UpperCAmelCase_ , state.pop('type' ) )
lowerCamelCase : Any = component_class(**UpperCAmelCase_ )
setattr(self.backend_tokenizer , UpperCAmelCase_ , UpperCAmelCase_ )
@property
def _UpperCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _UpperCamelCase ( self , UpperCAmelCase_ ) -> List[Any]:
lowerCamelCase : Optional[int] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else value
lowerCamelCase : int = value
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> BatchEncoding:
lowerCamelCase : str = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , *UpperCAmelCase_ , **UpperCAmelCase_ ) -> BatchEncoding:
lowerCamelCase : Optional[Any] = kwargs.get('is_split_into_words' , UpperCAmelCase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
'to use it with pretokenized inputs.' )
return super()._encode_plus(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> Tuple[str]:
lowerCamelCase : Any = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_ )
return tuple(UpperCAmelCase_ )
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_=None ) -> List[Any]:
lowerCamelCase : Union[str, Any] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ = None ) -> List[int]:
lowerCamelCase : List[Any] = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 205
| 0
|
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = ['''audio_values''', '''audio_mask''']
def __init__( self : Union[str, Any] , __UpperCAmelCase : Optional[Any]=2048 , __UpperCAmelCase : Any=1 , __UpperCAmelCase : Optional[Any]=[16, 16] , __UpperCAmelCase : int=128 , __UpperCAmelCase : Union[str, Any]=44100 , __UpperCAmelCase : Optional[int]=86 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : Dict=0.0 , **__UpperCAmelCase : List[str] , ):
'''simple docstring'''
super().__init__(
feature_size=__UpperCAmelCase , sampling_rate=__UpperCAmelCase , padding_value=__UpperCAmelCase , **__UpperCAmelCase , )
_A = spectrogram_length
_A = num_channels
_A = patch_size
_A = feature_size // self.patch_size[1]
_A = n_fft
_A = sampling_rate // hop_length_to_sampling_rate
_A = sampling_rate
_A = padding_value
_A = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCAmelCase , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=__UpperCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : np.array ):
'''simple docstring'''
_A = spectrogram(
__UpperCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
_A = log_spec[:, :-1]
_A = log_spec - 20.0
_A = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , __UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[bool] = True , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : bool = False , __UpperCAmelCase : bool = False , **__UpperCAmelCase : Dict , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
_A = isinstance(__UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
_A = is_batched_numpy or (
isinstance(__UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_A = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCAmelCase , np.ndarray ):
_A = np.asarray(__UpperCAmelCase , dtype=np.floataa )
elif isinstance(__UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_A = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_A = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_A = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __UpperCAmelCase ):
_A = [np.asarray(__UpperCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_A = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_A = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_A = np.array(__UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
_A = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_A = np.ones([len(__UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_A = padded_audio_features * self.padding_value
for i in range(len(__UpperCAmelCase ) ):
_A = audio_features[i]
_A = feature
# return as BatchFeature
if return_attention_mask:
_A = {"audio_values": padded_audio_features, "audio_mask": audio_mask}
else:
_A = {"audio_values": padded_audio_features}
_A = BatchFeature(data=__UpperCAmelCase , tensor_type=__UpperCAmelCase )
return encoded_inputs
| 79
|
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase (_snake_case , _snake_case , unittest.TestCase ):
'''simple docstring'''
_snake_case : Union[str, Any] = IFImgaImgSuperResolutionPipeline
_snake_case : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''}
_snake_case : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} )
_snake_case : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self._get_superresolution_dummy_components()
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
if str(_UpperCamelCase ).startswith('mps' ):
UpperCAmelCase_ : List[Any] = torch.manual_seed(_UpperCamelCase )
else:
UpperCAmelCase_ : int = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
UpperCAmelCase_ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
UpperCAmelCase_ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'original_image': original_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __UpperCAmelCase ( self ) -> Any:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __UpperCAmelCase ( self ) -> Dict:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __UpperCAmelCase ( self ) -> str:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __UpperCAmelCase ( self ) -> List[Any]:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
self._test_save_load_local()
def __UpperCAmelCase ( self ) -> Dict:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 29
| 0
|
"""simple docstring"""
import string
from math import logaa
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[str] = document.translate(
str.maketrans('' , '' , string.punctuation ) ).replace('\n' , '' )
__lowerCAmelCase : Dict = document_without_punctuation.split(' ' ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = corpus.lower().translate(
str.maketrans('' , '' , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowerCAmelCase : List[Any] = corpus_without_punctuation.split('\n' )
__lowerCAmelCase : Optional[Any] = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_UpperCamelCase ))
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
if smoothing:
if n == 0:
raise ValueError('log10(0) is undefined.' )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError('df must be > 0' )
elif n == 0:
raise ValueError('log10(0) is undefined.' )
return round(logaa(n / df ) , 3 )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
return round(tf * idf , 3 )
| 182
|
"""simple docstring"""
lowerCamelCase__ = """0.21.0"""
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich
| 182
| 1
|
import requests
from bsa import BeautifulSoup
def lowerCAmelCase_ ( snake_case_ = "AAPL" ):
_A : str = f'''https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'''
_A : List[Any] = BeautifulSoup(requests.get(snake_case_ ).text,"""html.parser""" )
_A : Union[str, Any] = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""",class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 26
|
from __future__ import annotations
import numpy as np
def lowerCAmelCase_ ( snake_case_ ):
return np.maximum(0,snake_case_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 26
| 1
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int=False ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
UpperCAmelCase__ = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Loading PyTorch weights from {pt_path}''' )
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""" )
logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' )
UpperCAmelCase__ = convert_pytorch_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
UpperCAmelCase__ = convert_pytorch_sharded_state_dict_to_flax(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return flax_state_dict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple[str] , SCREAMING_SNAKE_CASE__ : np.ndarray , SCREAMING_SNAKE_CASE__ : Dict[str, jnp.ndarray] , SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
def is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ : Tuple[str] ) -> bool:
return len(set(SCREAMING_SNAKE_CASE__ ) & {key, (model_prefix,) + key} ) > 0
# layer norm
UpperCAmelCase__ = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
UpperCAmelCase__ = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
UpperCAmelCase__ = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# embedding
UpperCAmelCase__ = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase__ = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase__ = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase__ = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
UpperCAmelCase__ = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
UpperCAmelCase__ = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
UpperCAmelCase__ = pt_tuple_key[-2] + """_v"""
if name is not None:
UpperCAmelCase__ = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any ):
'''simple docstring'''
UpperCAmelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
UpperCAmelCase__ = flax_model.params["""params"""]
else:
UpperCAmelCase__ = flax_model.params
UpperCAmelCase__ = flatten_dict(SCREAMING_SNAKE_CASE__ )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase__ = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = {}
UpperCAmelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
UpperCAmelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
UpperCAmelCase__ = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ = jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] ):
'''simple docstring'''
import torch
# Load the index
UpperCAmelCase__ = {}
for shard_file in shard_filenames:
# load using msgpack utils
UpperCAmelCase__ = torch.load(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = {k: v.numpy() for k, v in pt_state_dict.items()}
UpperCAmelCase__ = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
UpperCAmelCase__ = flax_model.params["""params"""]
UpperCAmelCase__ = flatten_dict(SCREAMING_SNAKE_CASE__ )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
UpperCAmelCase__ = flax_model.params
UpperCAmelCase__ = flatten_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
UpperCAmelCase__ = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase__ = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
UpperCAmelCase__ = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ = pt_tuple_key[1:]
# Correctly rename weight parameters
UpperCAmelCase__ , UpperCAmelCase__ = rename_key_and_reshape_tensor(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# add model prefix if necessary
UpperCAmelCase__ = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
UpperCAmelCase__ = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
if "var" in flax_key[-1]:
UpperCAmelCase__ = jnp.asarray(SCREAMING_SNAKE_CASE__ )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ = jnp.asarray(SCREAMING_SNAKE_CASE__ )
else:
# also add unexpected weight so that warning is thrown
UpperCAmelCase__ = jnp.asarray(SCREAMING_SNAKE_CASE__ )
return unflatten_dict(SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
UpperCAmelCase__ = os.path.abspath(SCREAMING_SNAKE_CASE__ )
logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' )
# import correct flax class
UpperCAmelCase__ = getattr(SCREAMING_SNAKE_CASE__ , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(SCREAMING_SNAKE_CASE__ , """rb""" ) as state_f:
try:
UpperCAmelCase__ = from_bytes(SCREAMING_SNAKE_CASE__ , state_f.read() )
except UnpicklingError:
raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' )
return load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
UpperCAmelCase__ = flatten_dict(jax.tree_util.tree_map(lambda SCREAMING_SNAKE_CASE__ : x.dtype == jnp.bfloataa , SCREAMING_SNAKE_CASE__ ) ).values()
if any(SCREAMING_SNAKE_CASE__ ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
UpperCAmelCase__ = jax.tree_util.tree_map(
lambda SCREAMING_SNAKE_CASE__ : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = flatten_dict(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = pt_model.state_dict()
UpperCAmelCase__ = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
UpperCAmelCase__ = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
UpperCAmelCase__ = []
UpperCAmelCase__ = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
UpperCAmelCase__ = flax_key_tuple[0] == pt_model.base_model_prefix
UpperCAmelCase__ = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
UpperCAmelCase__ = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
UpperCAmelCase__ = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# conv layer
UpperCAmelCase__ = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ = jnp.transpose(SCREAMING_SNAKE_CASE__ , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE__ ) not in pt_model_dict:
# linear layer
UpperCAmelCase__ = flax_key_tuple[:-1] + ("""weight""",)
UpperCAmelCase__ = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
UpperCAmelCase__ = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
UpperCAmelCase__ = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
UpperCAmelCase__ = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
UpperCAmelCase__ = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
UpperCAmelCase__ = """.""".join(SCREAMING_SNAKE_CASE__ )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
UpperCAmelCase__ = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
UpperCAmelCase__ = key.split(""".""" )
UpperCAmelCase__ = None
if key_components[-3::2] == ["parametrizations", "original0"]:
UpperCAmelCase__ = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
UpperCAmelCase__ = key_components[-2] + """_v"""
if name is not None:
UpperCAmelCase__ = key_components[:-3] + [name]
UpperCAmelCase__ = """.""".join(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = key
if flax_key in special_pt_names:
UpperCAmelCase__ = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '''
F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
else:
# add weight to pytorch dict
UpperCAmelCase__ = np.asarray(SCREAMING_SNAKE_CASE__ ) if not isinstance(SCREAMING_SNAKE_CASE__ , np.ndarray ) else flax_tensor
UpperCAmelCase__ = torch.from_numpy(SCREAMING_SNAKE_CASE__ )
# remove from missing keys
missing_keys.remove(SCREAMING_SNAKE_CASE__ )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(SCREAMING_SNAKE_CASE__ )
pt_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# re-transform missing_keys to list
UpperCAmelCase__ = list(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'''
F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'''
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'''
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
logger.warning(
F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'''
F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'''
""" use it for predictions and inference.""" )
else:
logger.warning(
F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'''
"""If your task is similar to the task the model of the checkpoint was trained on, """
F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' )
return pt_model
| 61
|
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = """pix2struct_text_model"""
lowerCAmelCase_ : str = ["""past_key_values"""]
lowerCAmelCase_ : Dict = {
"""hidden_size""": """hidden_size""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : List[str] , _UpperCAmelCase : Dict=5_02_44 , _UpperCAmelCase : Tuple=7_68 , _UpperCAmelCase : List[Any]=64 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Tuple=12 , _UpperCAmelCase : Dict=32 , _UpperCAmelCase : Any=1_28 , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : str=1E-6 , _UpperCAmelCase : List[str]=1.0 , _UpperCAmelCase : str="gelu_new" , _UpperCAmelCase : str=0 , _UpperCAmelCase : Any=False , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=True , **_UpperCAmelCase : str , ):
"""simple docstring"""
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = d_kv
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = num_layers
UpperCAmelCase__ = num_heads
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = layer_norm_epsilon
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = use_cache
UpperCAmelCase__ = eos_token_id
UpperCAmelCase__ = decoder_start_token_id
# for backwards compatibility
UpperCAmelCase__ = dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : int ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = """pix2struct_vision_model"""
def __init__( self : Any , _UpperCAmelCase : List[Any]=7_68 , _UpperCAmelCase : Optional[int]=7_68 , _UpperCAmelCase : Dict=20_48 , _UpperCAmelCase : Tuple=64 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : int=12 , _UpperCAmelCase : Dict="gelu_new" , _UpperCAmelCase : List[Any]=1E-6 , _UpperCAmelCase : int=0.0 , _UpperCAmelCase : str=0.0 , _UpperCAmelCase : Union[str, Any]=1E-10 , _UpperCAmelCase : Union[str, Any]=1.0 , _UpperCAmelCase : Optional[int]=40_96 , _UpperCAmelCase : Optional[int]=32 , _UpperCAmelCase : Dict=1_28 , **_UpperCAmelCase : int , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = patch_embed_hidden_size
UpperCAmelCase__ = d_ff
UpperCAmelCase__ = dropout_rate
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = attention_dropout
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = dense_act_fn
UpperCAmelCase__ = seq_len
UpperCAmelCase__ = relative_attention_num_buckets
UpperCAmelCase__ = relative_attention_max_distance
UpperCAmelCase__ = d_kv
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Optional[Any] , _UpperCAmelCase : Union[str, os.PathLike] , **_UpperCAmelCase : Optional[int] ):
"""simple docstring"""
cls._set_token_in_kwargs(_UpperCAmelCase )
UpperCAmelCase__ , UpperCAmelCase__ = cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("""model_type""" ) == "pix2struct":
UpperCAmelCase__ = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class lowerCAmelCase_ ( lowerCamelCase_ ):
'''simple docstring'''
lowerCAmelCase_ : str = """pix2struct"""
lowerCAmelCase_ : Union[str, Any] = True
def __init__( self : int , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : Dict=None , _UpperCAmelCase : Any=1.0 , _UpperCAmelCase : Tuple=0.02 , _UpperCAmelCase : Optional[Any]=False , _UpperCAmelCase : Union[str, Any]=False , _UpperCAmelCase : List[str]=True , **_UpperCAmelCase : Optional[int] , ):
"""simple docstring"""
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
UpperCAmelCase__ = {}
logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" )
if vision_config is None:
UpperCAmelCase__ = {}
logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" )
UpperCAmelCase__ = PixaStructTextConfig(**_UpperCAmelCase )
UpperCAmelCase__ = PixaStructVisionConfig(**_UpperCAmelCase )
UpperCAmelCase__ = self.text_config.decoder_start_token_id
UpperCAmelCase__ = self.text_config.pad_token_id
UpperCAmelCase__ = self.text_config.eos_token_id
UpperCAmelCase__ = initializer_factor
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = self.initializer_range
UpperCAmelCase__ = is_vqa
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , _UpperCAmelCase : PixaStructTextConfig , _UpperCAmelCase : PixaStructVisionConfig , **_UpperCAmelCase : Any ):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = copy.deepcopy(self.__dict__ )
UpperCAmelCase__ = self.text_config.to_dict()
UpperCAmelCase__ = self.vision_config.to_dict()
UpperCAmelCase__ = self.__class__.model_type
return output
| 61
| 1
|
def lowerCamelCase__ ( A__ : list[list[int]] , A__ : int , A__ : int , A__ : set ):
'''simple docstring'''
__lowerCamelCase, __lowerCamelCase = len(A__ ), len(grid[0] )
if (
min(A__ , A__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
__lowerCamelCase = 0
count += depth_first_search(A__ , row + 1 , A__ , A__ )
count += depth_first_search(A__ , row - 1 , A__ , A__ )
count += depth_first_search(A__ , A__ , col + 1 , A__ )
count += depth_first_search(A__ , A__ , col - 1 , A__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 12
|
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
snake_case_ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a (lowerCamelCase ):
def __init__( self : str , __magic_name__ : CLIPSegForImageSegmentation , __magic_name__ : CLIPSegProcessor , __magic_name__ : AutoencoderKL , __magic_name__ : CLIPTextModel , __magic_name__ : CLIPTokenizer , __magic_name__ : UNetaDConditionModel , __magic_name__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __magic_name__ : StableDiffusionSafetyChecker , __magic_name__ : CLIPImageProcessor , ) -> str:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Optional[int] = dict(scheduler.config )
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : List[str] = FrozenDict(__magic_name__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
UpperCAmelCase_ : Dict = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , __magic_name__ , standard_warn=__magic_name__ )
UpperCAmelCase_ : Dict = dict(scheduler.config )
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = FrozenDict(__magic_name__ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=__magic_name__ , segmentation_processor=__magic_name__ , vae=__magic_name__ , text_encoder=__magic_name__ , tokenizer=__magic_name__ , unet=__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ , feature_extractor=__magic_name__ , )
def UpperCAmelCase__ ( self : List[str] , __magic_name__ : Optional[Union[str, int]] = "auto" ) -> List[str]:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ : Union[str, Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__magic_name__ )
def UpperCAmelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.enable_attention_slicing(__magic_name__ )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
UpperCAmelCase_ : Tuple = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(__magic_name__ , __magic_name__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(__magic_name__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , __magic_name__ : Union[str, List[str]] , __magic_name__ : Union[torch.FloatTensor, PIL.Image.Image] , __magic_name__ : str , __magic_name__ : int = 5_12 , __magic_name__ : int = 5_12 , __magic_name__ : int = 50 , __magic_name__ : float = 7.5 , __magic_name__ : Optional[Union[str, List[str]]] = None , __magic_name__ : Optional[int] = 1 , __magic_name__ : float = 0.0 , __magic_name__ : Optional[torch.Generator] = None , __magic_name__ : Optional[torch.FloatTensor] = None , __magic_name__ : Optional[str] = "pil" , __magic_name__ : bool = True , __magic_name__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __magic_name__ : int = 1 , **__magic_name__ : Tuple , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
UpperCAmelCase_ : int = self.segmentation_model(**__magic_name__ )
UpperCAmelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
UpperCAmelCase_ : List[Any] = self.numpy_to_pil(__magic_name__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
UpperCAmelCase_ : int = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=__magic_name__ , image=__magic_name__ , mask_image=__magic_name__ , height=__magic_name__ , width=__magic_name__ , num_inference_steps=__magic_name__ , guidance_scale=__magic_name__ , negative_prompt=__magic_name__ , num_images_per_prompt=__magic_name__ , eta=__magic_name__ , generator=__magic_name__ , latents=__magic_name__ , output_type=__magic_name__ , return_dict=__magic_name__ , callback=__magic_name__ , callback_steps=__magic_name__ , )
| 125
| 0
|
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('Destination width/height should be > 0' )
lowerCAmelCase__ = img
lowerCAmelCase__ = img.shape[1]
lowerCAmelCase__ = img.shape[0]
lowerCAmelCase__ = dst_width
lowerCAmelCase__ = dst_height
lowerCAmelCase__ = self.src_w / self.dst_w
lowerCAmelCase__ = self.src_h / self.dst_h
lowerCAmelCase__ = lowerCAmelCase__ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_55
)
def UpperCamelCase__ ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCAmelCase__ = self.img[self.get_y(_lowerCamelCase )][self.get_x(_lowerCamelCase )]
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
__snake_case , __snake_case : Any = 8_00, 6_00
__snake_case : Dict = imread("""image_data/lena.jpg""", 1)
__snake_case : Tuple = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 368
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 1
lowerCAmelCase__ = 3
lowerCAmelCase__ = (32, 32)
lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_UpperCamelCase )
return image
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_UpperCamelCase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
return CLIPTextModel(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , max_noise_level=3_50 , )
lowerCAmelCase__ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ = output.images
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=_UpperCamelCase , generator=_UpperCamelCase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase__ = image[0, -3:, -3:, -1]
lowerCAmelCase__ = image_from_tuple[0, -3:, -3:, -1]
lowerCAmelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
lowerCAmelCase__ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , max_noise_level=3_50 , )
lowerCAmelCase__ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ = output.images
assert image.shape[0] == 2
lowerCAmelCase__ = torch.Generator(device=_UpperCamelCase ).manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=_UpperCamelCase , generator=_UpperCamelCase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.dummy_cond_unet_upscale
lowerCAmelCase__ = DDPMScheduler()
lowerCAmelCase__ = DDIMScheduler(prediction_type='v_prediction' )
lowerCAmelCase__ = self.dummy_vae
lowerCAmelCase__ = self.dummy_text_encoder
lowerCAmelCase__ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase__ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
lowerCAmelCase__ = unet.half()
lowerCAmelCase__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
lowerCAmelCase__ = StableDiffusionUpscalePipeline(
unet=_UpperCamelCase , low_res_scheduler=_UpperCamelCase , scheduler=_UpperCamelCase , vae=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , max_noise_level=3_50 , )
lowerCAmelCase__ = sd_pipe.to(_UpperCamelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase__ = 'A painting of a squirrel eating a burger'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = sd_pipe(
[prompt] , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=2 , output_type='np' , ).images
lowerCAmelCase__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def UpperCamelCase__ ( self ):
"""simple docstring"""
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
lowerCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(_UpperCamelCase )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ = 'a cat sitting on a park bench'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
lowerCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing()
lowerCAmelCase__ = 'a cat sitting on a park bench'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , output_type='np' , )
lowerCAmelCase__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCamelCase__ ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase__ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
lowerCAmelCase__ = 'stabilityai/stable-diffusion-x4-upscaler'
lowerCAmelCase__ = StableDiffusionUpscalePipeline.from_pretrained(
_UpperCamelCase , torch_dtype=torch.floataa , )
pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase__ = 'a cat sitting on a park bench'
lowerCAmelCase__ = torch.manual_seed(0 )
lowerCAmelCase__ = pipe(
prompt=_UpperCamelCase , image=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , output_type='np' , )
lowerCAmelCase__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 122
| 0
|
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def lowerCAmelCase_ ( __lowerCAmelCase )-> str:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] ={}
UpperCAmelCase : Union[str, Any] =job['''started_at''']
UpperCAmelCase : Union[str, Any] =job['''completed_at''']
UpperCAmelCase : List[str] =date_parser.parse(A__ )
UpperCAmelCase : Optional[Any] =date_parser.parse(A__ )
UpperCAmelCase : Optional[Any] =round((end_datetime - start_datetime).total_seconds() / 60.0 )
UpperCAmelCase : Optional[int] =start
UpperCAmelCase : Optional[Any] =end
UpperCAmelCase : Optional[int] =duration_in_min
return job_info
def lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase=None )-> Tuple:
'''simple docstring'''
UpperCAmelCase : str =None
if token is not None:
UpperCAmelCase : List[Any] ={'''Accept''': '''application/vnd.github+json''', '''Authorization''': f'''Bearer {token}'''}
UpperCAmelCase : Tuple =f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
UpperCAmelCase : List[str] =requests.get(A__ , headers=A__ ).json()
UpperCAmelCase : Optional[int] ={}
try:
job_time.update({job['''name''']: extract_time_from_single_job(A__ ) for job in result['''jobs''']} )
UpperCAmelCase : List[str] =math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(A__ ):
UpperCAmelCase : int =requests.get(url + f'''&page={i + 2}''' , headers=A__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(A__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
__snake_case = parser.parse_args()
__snake_case = get_job_time(args.workflow_run_id)
__snake_case = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(f'{k}: {v["duration"]}')
| 348
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowercase_ = 'Create a default config file for Accelerate with only a few flags set.'
def a ( A__ : Optional[Any]="no" , A__ : str = default_json_config_file , A__ : bool = False ) -> Optional[int]:
"""simple docstring"""
_lowercase =Path(A__ )
path.parent.mkdir(parents=A__ , exist_ok=A__ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
_lowercase =mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
_lowercase ={
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
_lowercase =torch.cuda.device_count()
_lowercase =num_gpus
_lowercase =False
if num_gpus > 1:
_lowercase ='MULTI_GPU'
else:
_lowercase ='NO'
elif is_xpu_available() and use_xpu:
_lowercase =torch.xpu.device_count()
_lowercase =num_xpus
_lowercase =False
if num_xpus > 1:
_lowercase ='MULTI_XPU'
else:
_lowercase ='NO'
elif is_npu_available():
_lowercase =torch.npu.device_count()
_lowercase =num_npus
_lowercase =False
if num_npus > 1:
_lowercase ='MULTI_NPU'
else:
_lowercase ='NO'
else:
_lowercase =0
_lowercase =True
_lowercase =1
_lowercase ='NO'
_lowercase =ClusterConfig(**A__ )
config.to_json_file(A__ )
return path
def a ( A__ : Dict , A__ : Optional[Any] ) -> List[Any]:
"""simple docstring"""
_lowercase =parser.add_parser('default' , parents=A__ , help=A__ , formatter_class=A__ )
parser.add_argument(
'--config_file' , default=A__ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=A__ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=A__ )
return parser
def a ( A__ : List[str] ) -> Any:
"""simple docstring"""
_lowercase =write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' )
| 205
| 0
|
from __future__ import annotations
import unittest
from transformers import XGLMConfig, XGLMTokenizer, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.xglm.modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
)
@require_tf
class _UpperCAmelCase :
'''simple docstring'''
__A = XGLMConfig
__A = {}
__A = '''gelu'''
def __init__( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any]=14 , lowercase_ : Any=7 , lowercase_ : Optional[Any]=True , lowercase_ : List[str]=True , lowercase_ : Any=True , lowercase_ : List[str]=99 , lowercase_ : Any=32 , lowercase_ : Optional[int]=2 , lowercase_ : List[Any]=4 , lowercase_ : Any=37 , lowercase_ : List[Any]="gelu" , lowercase_ : List[Any]=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Optional[int]=512 , lowercase_ : Optional[Any]=0.02 , ) -> int:
"""simple docstring"""
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = d_model
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = ffn_dim
_UpperCamelCase = activation_function
_UpperCamelCase = activation_dropout
_UpperCamelCase = attention_dropout
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = initializer_range
_UpperCamelCase = None
_UpperCamelCase = 0
_UpperCamelCase = 2
_UpperCamelCase = 1
def __UpperCAmelCase ( self : Optional[Any]) -> List[Any]:
"""simple docstring"""
return XGLMConfig.from_pretrained("facebook/xglm-564M")
def __UpperCAmelCase ( self : Union[str, Any]) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = tf.clip_by_value(
ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) , clip_value_min=0 , clip_value_max=3)
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length])
_UpperCamelCase = self.get_config()
_UpperCamelCase = floats_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
)
def __UpperCAmelCase ( self : int) -> Any:
"""simple docstring"""
return XGLMConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , num_layers=self.num_hidden_layers , attention_heads=self.num_attention_heads , ffn_dim=self.ffn_dim , activation_function=self.activation_function , activation_dropout=self.activation_dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , return_dict=lowerCAmelCase__ , )
def __UpperCAmelCase ( self : Tuple) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = self.prepare_config_and_inputs()
(
_UpperCamelCase
) = config_and_inputs
_UpperCamelCase = {
"input_ids": input_ids,
"head_mask": head_mask,
}
return config, inputs_dict
@require_tf
class _UpperCAmelCase ( lowerCAmelCase, lowerCAmelCase, unittest.TestCase ):
'''simple docstring'''
__A = (TFXGLMModel, TFXGLMForCausalLM) if is_tf_available() else ()
__A = (TFXGLMForCausalLM,) if is_tf_available() else ()
__A = (
{'''feature-extraction''': TFXGLMModel, '''text-generation''': TFXGLMForCausalLM} if is_tf_available() else {}
)
__A = False
__A = False
__A = False
def __UpperCAmelCase ( self : List[str]) -> int:
"""simple docstring"""
_UpperCamelCase = TFXGLMModelTester(self)
_UpperCamelCase = ConfigTester(self , config_class=lowerCAmelCase__ , n_embd=37)
def __UpperCAmelCase ( self : List[str]) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
@slow
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = TFXGLMModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
@unittest.skip(reason="Currently, model embeddings are going to undergo a major refactor.")
def __UpperCAmelCase ( self : Union[str, Any]) -> int:
"""simple docstring"""
super().test_resize_token_embeddings()
@require_tf
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase ( self : Optional[int] , lowercase_ : Optional[Any]=True) -> Tuple:
"""simple docstring"""
_UpperCamelCase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M")
_UpperCamelCase = tf.convert_to_tensor([[2, 268, 9865]] , dtype=tf.intaa) # The dog
# </s> The dog is a very friendly dog. He is very affectionate and loves to play with other
# fmt: off
_UpperCamelCase = [2, 268, 9865, 67, 11, 1988, 57252, 9865, 5, 984, 67, 1988, 213838, 1658, 53, 70446, 33, 6657, 278, 1581]
# fmt: on
_UpperCamelCase = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , num_beams=1)
if verify_outputs:
self.assertListEqual(output_ids[0].numpy().tolist() , lowerCAmelCase__)
@slow
def __UpperCAmelCase ( self : List[Any]) -> str:
"""simple docstring"""
_UpperCamelCase = XGLMTokenizer.from_pretrained("facebook/xglm-564M")
_UpperCamelCase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M")
tf.random.set_seed(0)
_UpperCamelCase = tokenizer("Today is a nice day and" , return_tensors="tf")
_UpperCamelCase = tokenized.input_ids
# forces the generation to happen on CPU, to avoid GPU-related quirks (and assure same output regardless of the available devices)
with tf.device(":/CPU:0"):
_UpperCamelCase = model.generate(lowerCAmelCase__ , do_sample=lowerCAmelCase__ , seed=[7, 0])
_UpperCamelCase = tokenizer.decode(output_ids[0] , skip_special_tokens=lowerCAmelCase__)
_UpperCamelCase = (
"Today is a nice day and warm evening here over Southern Alberta!! Today when they closed schools due"
)
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def __UpperCAmelCase ( self : Optional[int]) -> str:
"""simple docstring"""
_UpperCamelCase = TFXGLMForCausalLM.from_pretrained("facebook/xglm-564M")
_UpperCamelCase = XGLMTokenizer.from_pretrained("facebook/xglm-564M")
_UpperCamelCase = "left"
# use different length sentences to test batching
_UpperCamelCase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When",
"Hello, my dog is a little",
]
_UpperCamelCase = tokenizer(lowerCAmelCase__ , return_tensors="tf" , padding=lowerCAmelCase__)
_UpperCamelCase = inputs["input_ids"]
_UpperCamelCase = model.generate(input_ids=lowerCAmelCase__ , attention_mask=inputs["attention_mask"] , max_new_tokens=12)
_UpperCamelCase = tokenizer(sentences[0] , return_tensors="tf").input_ids
_UpperCamelCase = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12)
_UpperCamelCase = tokenizer(sentences[1] , return_tensors="tf").input_ids
_UpperCamelCase = model.generate(input_ids=lowerCAmelCase__ , max_new_tokens=12)
_UpperCamelCase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__)
_UpperCamelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase__)
_UpperCamelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase__)
_UpperCamelCase = [
"This is an extremelly long sentence that only exists to test the ability of the model to cope with "
"left-padding, such as in batched generation. The output for the sequence below should be the same "
"regardless of whether left padding is applied or not. When left padding is applied, the sequence will be "
"a single",
"Hello, my dog is a little bit of a shy one, but he is very friendly",
]
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__ , [non_padded_sentence, padded_sentence])
| 367
|
import os
def lowerCAmelCase__ ( ) ->Any:
'''simple docstring'''
with open(os.path.dirname(a__ ) + "/grid.txt" ) as f:
_UpperCamelCase = [] # noqa: E741
for _ in range(20 ):
l.append([int(a__ ) for x in f.readline().split()] )
_UpperCamelCase = 0
# right
for i in range(20 ):
for j in range(17 ):
_UpperCamelCase = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
_UpperCamelCase = temp
# down
for i in range(17 ):
for j in range(20 ):
_UpperCamelCase = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
_UpperCamelCase = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
_UpperCamelCase = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
_UpperCamelCase = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
_UpperCamelCase = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
_UpperCamelCase = temp
return maximum
if __name__ == "__main__":
print(solution())
| 63
| 0
|
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
__UpperCamelCase : Any = getLogger(__name__)
__UpperCamelCase : int = 'cuda' if torch.cuda.is_available() else 'cpu'
def A ( _lowercase , _lowercase , _lowercase , _lowercase = 8 , _lowercase = DEFAULT_DEVICE , _lowercase=False , _lowercase="summarization" , _lowercase=None , **_lowercase , ):
SCREAMING_SNAKE_CASE : List[str] = Path(_lowercase ).open('''w''' , encoding='''utf-8''' )
SCREAMING_SNAKE_CASE : int = str(_lowercase )
SCREAMING_SNAKE_CASE : Any = AutoModelForSeqaSeqLM.from_pretrained(_lowercase ).to(_lowercase )
if fpaa:
SCREAMING_SNAKE_CASE : Dict = model.half()
SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(_lowercase )
logger.info(f"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
SCREAMING_SNAKE_CASE : str = time.time()
# update config with task specific params
use_task_specific_params(_lowercase , _lowercase )
if prefix is None:
SCREAMING_SNAKE_CASE : Optional[int] = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_lowercase , _lowercase ) ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = [prefix + text for text in examples_chunk]
SCREAMING_SNAKE_CASE : Dict = tokenizer(_lowercase , return_tensors='''pt''' , truncation=_lowercase , padding='''longest''' ).to(_lowercase )
SCREAMING_SNAKE_CASE : str = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **_lowercase , )
SCREAMING_SNAKE_CASE : Optional[Any] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
SCREAMING_SNAKE_CASE : Tuple = int(time.time() - start_time ) # seconds
SCREAMING_SNAKE_CASE : str = len(_lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def A ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def A ( _lowercase=True ):
SCREAMING_SNAKE_CASE : int = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=_lowercase , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=_lowercase , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=_lowercase , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=_lowercase , required=_lowercase , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=_lowercase , required=_lowercase , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=_lowercase , required=_lowercase , default=_lowercase , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=_lowercase , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=_lowercase , default=8 , required=_lowercase , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=_lowercase , default=-1 , required=_lowercase , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=_lowercase , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_known_args()
SCREAMING_SNAKE_CASE : Optional[Any] = parse_numeric_n_bool_cl_kwargs(_lowercase )
if parsed_args and verbose:
print(f"""parsed the following generate kwargs: {parsed_args}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
SCREAMING_SNAKE_CASE : Any = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
SCREAMING_SNAKE_CASE : List[str] = generate_summaries_or_translations(
_lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **_lowercase , )
if args.reference_path is None:
return {}
# Compute scores
SCREAMING_SNAKE_CASE : Dict = calculate_bleu if '''translation''' in args.task else calculate_rouge
SCREAMING_SNAKE_CASE : Union[str, Any] = [x.rstrip() for x in open(args.save_path ).readlines()]
SCREAMING_SNAKE_CASE : Optional[int] = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_lowercase )]
SCREAMING_SNAKE_CASE : dict = score_fn(_lowercase , _lowercase )
scores.update(_lowercase )
if args.dump_args:
scores.update(_lowercase )
if args.info:
SCREAMING_SNAKE_CASE : Tuple = args.info
if verbose:
print(_lowercase )
if args.score_path is not None:
json.dump(_lowercase , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 182
|
from math import ceil
def A ( _lowercase = 1_001 ):
SCREAMING_SNAKE_CASE : Any = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE : Union[str, Any] = 2 * i + 1
SCREAMING_SNAKE_CASE : int = 2 * i
SCREAMING_SNAKE_CASE : List[str] = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
__UpperCamelCase : Dict = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 182
| 1
|
from __future__ import annotations
from typing import Any
def __magic_name__ ( __lowerCAmelCase : list ) -> int:
if not postfix_notation:
return 0
__lowerCamelCase = {"+", "-", "*", "/"}
__lowerCamelCase = []
for token in postfix_notation:
if token in operations:
__lowerCamelCase = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_lowercase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : List[str] ) -> Dict:
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__lowerCamelCase = dict(zip(SCREAMING_SNAKE_CASE__ , range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
__lowerCamelCase = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__lowerCamelCase = {'''unk_token''': '''<unk>'''}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48145466, 0.4578275, 0.40821073],
'''image_std''': [0.26862954, 0.26130258, 0.27577711],
}
__lowerCamelCase = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : int , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Union[str, Any]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[int] , **SCREAMING_SNAKE_CASE__ : Any ) -> List[Any]:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE__ )
def __A ( self : Dict ) -> Dict:
shutil.rmtree(self.tmpdirname )
def __A ( self : str ) -> Any:
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __A ( self : List[Any] ) -> List[str]:
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE__ )
self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
__lowerCamelCase = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE__ )
def __A ( self : Optional[Any] ) -> Union[str, Any]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
__lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __A ( self : List[Any] ) -> Optional[int]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __A ( self : List[Any] ) -> Any:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = '''lower newer'''
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=SCREAMING_SNAKE_CASE__ , images=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __A ( self : Optional[Any] ) -> List[str]:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(images=SCREAMING_SNAKE_CASE__ , visual_prompt=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(SCREAMING_SNAKE_CASE__ ):
processor()
def __A ( self : List[Any] ) -> Any:
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = CLIPSegProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , image_processor=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 339
| 0
|
"""simple docstring"""
import argparse
from collections import defaultdict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : List[Any] = f.readlines()
UpperCAmelCase_ : int = f"""class {class_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{4 * " "}def {test_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{8 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : Tuple = f"""{16 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = []
for line in lines:
if line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = True
elif in_class and line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = True
elif in_class and in_func and (line.startswith(__lowerCamelCase ) or line.startswith(__lowerCamelCase )):
UpperCAmelCase_ : Any = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ : Union[str, Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
UpperCAmelCase_ : int = False
else:
new_lines.append(__lowerCamelCase )
with open(__lowerCamelCase, "w" ) as f:
for line in new_lines:
f.write(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase=None ):
if fail is not None:
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Tuple = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ : str = None
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
UpperCAmelCase_ : Any = defaultdict(__lowerCamelCase )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_a = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 61
|
"""simple docstring"""
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def __a ( ):
UpperCAmelCase_ : List[Any] = {
"repo_name": ["test_repo1", "test_repo2", "test_repo3"],
"path": ["test_1.py", "test_2.py", "unit_test.py"],
"content": ["a " * 20, "a " * 30, "b " * 7],
}
UpperCAmelCase_ : Optional[int] = Dataset.from_dict(__lowerCamelCase )
return dataset
class A_ (lowercase__ ):
'''simple docstring'''
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = get_dataset()
UpperCAmelCase_ : Any = make_duplicate_clusters(lowercase_ , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = get_dataset()
UpperCAmelCase_ , UpperCAmelCase_ : List[str] = deduplicate_dataset(lowercase_ )
self.assertEqual(len(lowercase_ ) , 2 )
print(lowercase_ )
self.assertEqual(duplicate_clusters[0][0]["copies"] , 2 )
self.assertEqual(duplicate_clusters[0][0]["is_extreme"] , lowercase_ )
| 61
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class _A :
def __init__( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Tuple=13 , __magic_name__ : Optional[Any]=10 , __magic_name__ : Any=3 , __magic_name__ : str=2 , __magic_name__ : Tuple=2 , __magic_name__ : int=2 , __magic_name__ : Union[str, Any]=True , __magic_name__ : Dict=True , __magic_name__ : str=32 , __magic_name__ : str=5 , __magic_name__ : Union[str, Any]=4 , __magic_name__ : Any=37 , __magic_name__ : Optional[int]="gelu" , __magic_name__ : Tuple=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Dict=10 , __magic_name__ : Any=0.02 , __magic_name__ : Optional[Any]=0.9 , __magic_name__ : str=None , ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = parent
__snake_case : List[Any] = batch_size
__snake_case : Dict = image_size
__snake_case : Union[str, Any] = num_channels
__snake_case : Optional[Any] = patch_size
__snake_case : List[str] = tubelet_size
__snake_case : Union[str, Any] = num_frames
__snake_case : List[str] = is_training
__snake_case : str = use_labels
__snake_case : str = hidden_size
__snake_case : Dict = num_hidden_layers
__snake_case : Dict = num_attention_heads
__snake_case : str = intermediate_size
__snake_case : Union[str, Any] = hidden_act
__snake_case : int = hidden_dropout_prob
__snake_case : Optional[Any] = attention_probs_dropout_prob
__snake_case : Tuple = type_sequence_label_size
__snake_case : int = initializer_range
__snake_case : Any = mask_ratio
__snake_case : List[str] = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
__snake_case : Optional[Any] = (image_size // patch_size) ** 2
__snake_case : List[Any] = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
__snake_case : Union[str, Any] = int(mask_ratio * self.seq_length )
def lowercase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__snake_case : int = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__snake_case : int = None
if self.use_labels:
__snake_case : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Dict ) -> Dict:
"""simple docstring"""
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , )
def lowercase__ ( self : Any , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Any ) -> Optional[int]:
"""simple docstring"""
__snake_case : str = VideoMAEModel(config=__magic_name__ )
model.to(__magic_name__ )
model.eval()
__snake_case : List[Any] = model(__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : str , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
__snake_case : Tuple = VideoMAEForPreTraining(__magic_name__ )
model.to(__magic_name__ )
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__snake_case : Tuple = torch.ones((self.num_masks,) )
__snake_case : Tuple = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] )
__snake_case : Any = mask.expand(self.batch_size , -1 ).bool()
__snake_case : int = model(__magic_name__ , __magic_name__ )
# model only returns predictions for masked patches
__snake_case : Tuple = mask.sum().item()
__snake_case : Union[str, Any] = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) )
def lowercase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
__snake_case : Union[str, Any] = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case : List[str] = config_and_inputs
__snake_case : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _A ( __lowercase , __lowercase , unittest.TestCase ):
lowercase__: Optional[int] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
lowercase__: int = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
lowercase__: int = False
lowercase__: Optional[int] = False
lowercase__: Dict = False
lowercase__: int = False
def lowercase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
__snake_case : Tuple = VideoMAEModelTester(self )
__snake_case : Optional[Any] = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 )
def lowercase__ ( self : int , __magic_name__ : Union[str, Any] , __magic_name__ : List[str] , __magic_name__ : int=False ) -> Tuple:
"""simple docstring"""
__snake_case : Tuple = copy.deepcopy(__magic_name__ )
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
__snake_case : int = torch.ones((self.model_tester.num_masks,) )
__snake_case : str = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] )
__snake_case : Union[str, Any] = mask.expand(self.model_tester.batch_size , -1 ).bool()
__snake_case : List[Any] = bool_masked_pos.to(__magic_name__ )
if return_labels:
if model_class in [
*get_values(__magic_name__ ),
]:
__snake_case : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__magic_name__ )
return inputs_dict
def lowercase__ ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""VideoMAE does not use inputs_embeds""" )
def lowercase__ ( self : List[str] ) -> str:
"""simple docstring"""
pass
def lowercase__ ( self : str ) -> int:
"""simple docstring"""
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : str = model_class(__magic_name__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) )
def lowercase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Optional[int] = model_class(__magic_name__ )
__snake_case : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case : Any = [*signature.parameters.keys()]
__snake_case : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __magic_name__ )
def lowercase__ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
__snake_case : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__magic_name__ )
@slow
def lowercase__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case : str = VideoMAEModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def lowercase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
if not self.has_attentions:
pass
else:
__snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case : List[str] = True
for model_class in self.all_model_classes:
__snake_case : Optional[Any] = self.model_tester.seq_length - self.model_tester.num_masks
__snake_case : Any = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
__snake_case : Optional[int] = True
__snake_case : Union[str, Any] = False
__snake_case : Any = True
__snake_case : str = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Optional[int] = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Union[str, Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__snake_case : Dict = True
__snake_case : Any = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : Optional[Any] = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
__snake_case : Any = len(__magic_name__ )
# Check attention is always last and order is fine
__snake_case : Tuple = True
__snake_case : Union[str, Any] = True
__snake_case : Union[str, Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Dict = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
self.assertEqual(out_len + 1 , len(__magic_name__ ) )
__snake_case : Any = outputs.attentions
self.assertEqual(len(__magic_name__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def lowercase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
def check_hidden_states_output(__magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : List[str] ):
__snake_case : List[Any] = model_class(__magic_name__ )
model.to(__magic_name__ )
model.eval()
with torch.no_grad():
__snake_case : Tuple = model(**self._prepare_for_class(__magic_name__ , __magic_name__ ) )
__snake_case : List[str] = outputs.hidden_states
__snake_case : List[Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__magic_name__ ) , __magic_name__ )
__snake_case : Optional[int] = self.model_tester.seq_length - self.model_tester.num_masks
__snake_case : str = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__snake_case , __snake_case : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case : Union[str, Any] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case : Optional[Any] = True
check_hidden_states_output(__magic_name__ , __magic_name__ , __magic_name__ )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def lowercase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__snake_case : int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__snake_case : Optional[Any] = np.load(_lowerCamelCase )
return list(_lowerCamelCase )
@require_torch
@require_vision
class _A ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Dict ) -> List[Any]:
"""simple docstring"""
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Any ) -> List[Any]:
"""simple docstring"""
__snake_case : int = VideoMAEForVideoClassification.from_pretrained("""MCG-NJU/videomae-base-finetuned-kinetics""" ).to(
__magic_name__ )
__snake_case : Any = self.default_image_processor
__snake_case : Any = prepare_video()
__snake_case : int = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : Union[str, Any] = model(**__magic_name__ )
# verify the logits
__snake_case : Dict = torch.Size((1, 4_00) )
self.assertEqual(outputs.logits.shape , __magic_name__ )
__snake_case : Tuple = torch.tensor([0.3669, -0.0688, -0.2421] ).to(__magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
@slow
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
__snake_case : Any = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" ).to(__magic_name__ )
__snake_case : str = self.default_image_processor
__snake_case : List[str] = prepare_video()
__snake_case : List[Any] = image_processor(__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ )
# add boolean mask, indicating which patches to mask
__snake_case : List[Any] = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__snake_case : Any = torch.load(__magic_name__ )
# forward pass
with torch.no_grad():
__snake_case : str = model(**__magic_name__ )
# verify the logits
__snake_case : Tuple = torch.Size([1, 14_08, 15_36] )
__snake_case : Any = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=__magic_name__ )
self.assertEqual(outputs.logits.shape , __magic_name__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __magic_name__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `True`)
__snake_case : Union[str, Any] = torch.tensor([0.5142] , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1E-4 ) )
# verify the loss (`config.norm_pix_loss` = `False`)
__snake_case : Tuple = VideoMAEForPreTraining.from_pretrained("""MCG-NJU/videomae-base-short""" , norm_pix_loss=__magic_name__ ).to(
__magic_name__ )
with torch.no_grad():
__snake_case : Optional[Any] = model(**__magic_name__ )
__snake_case : str = torch.tensor(torch.tensor([0.6469] ) , device=__magic_name__ )
self.assertTrue(torch.allclose(outputs.loss , __magic_name__ , atol=1E-4 ) )
| 13
|
'''simple docstring'''
__UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
"""simple docstring"""
__snake_case : Dict = input("""Enter message: """ )
__snake_case : Optional[int] = input("""Enter key [alphanumeric]: """ )
__snake_case : Tuple = input("""Encrypt/Decrypt [e/d]: """ )
if mode.lower().startswith("""e""" ):
__snake_case : Any = """encrypt"""
__snake_case : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase )
elif mode.lower().startswith("""d""" ):
__snake_case : Optional[int] = """decrypt"""
__snake_case : Any = decrypt_message(_lowerCamelCase , _lowerCamelCase )
print(F'''\n{mode.title()}ed message:''' )
print(_lowerCamelCase )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """encrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
return translate_message(_lowerCamelCase , _lowerCamelCase , """decrypt""" )
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> str:
"""simple docstring"""
__snake_case : str = []
__snake_case : Dict = 0
__snake_case : Optional[int] = key.upper()
for symbol in message:
__snake_case : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(_lowerCamelCase )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(_lowerCamelCase ):
__snake_case : Tuple = 0
else:
translated.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
if __name__ == "__main__":
main()
| 13
| 1
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = """https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg"""
lowerCAmelCase__ : Tuple = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw ).convert("""RGB""" )
return image
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.embeddings.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.embeddings.layernorm.bias""") )
# fmt: on
return rename_keys
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = dct.pop(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = val
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
lowerCAmelCase__ : List[Any] = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
lowerCAmelCase__ : str = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
lowerCAmelCase__ : Dict = torch.cat((q_bias, torch.zeros_like(UpperCamelCase , requires_grad=UpperCamelCase ), v_bias) )
lowerCAmelCase__ : Optional[int] = qkv_bias
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = 364 if """coco""" in model_name else 224
lowerCAmelCase__ : str = InstructBlipVisionConfig(image_size=UpperCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
lowerCAmelCase__ : Tuple = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
lowerCAmelCase__ : Optional[Any] = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
lowerCAmelCase__ : Optional[int] = LlamaConfig.from_pretrained("""decapoda-research/llama-7b-hf""" , vocab_size=32001 ).to_dict()
elif "vicuna-13b" in model_name:
lowerCAmelCase__ : Optional[Any] = LlamaConfig.from_pretrained("""decapoda-research/llama-13b-hf""" , vocab_size=32001 ).to_dict()
else:
raise ValueError("""Model name not supported""" )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
lowerCAmelCase__ : Dict = InstructBlipQFormerConfig(vocab_size=30523 ).to_dict()
lowerCAmelCase__ : List[Any] = InstructBlipConfig(vision_config=UpperCamelCase , text_config=UpperCamelCase , qformer_config=UpperCamelCase )
return config, image_size
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase=None , UpperCamelCase=False ):
"""simple docstring"""
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained("""bert-base-uncased""" , truncation_side="""left""" )
qformer_tokenizer.add_special_tokens({"""bos_token""": """[DEC]"""} )
if "t5" in model_name:
lowerCAmelCase__ : Optional[int] = TaTokenizerFast.from_pretrained("""google/flan-t5-xl""" , truncation_side="""left""" )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
lowerCAmelCase__ : Optional[int] = LlamaTokenizerFast.from_pretrained(
"""huggyllama/llama-7b""" , truncation_side="""left""" , bos_token="""</s>""" , unk_token="""</s>""" )
tokenizer.add_special_tokens({"""pad_token""": """[PAD]"""} )
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = get_blipa_config(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = InstructBlipForConditionalGeneration(UpperCamelCase ).eval()
lowerCAmelCase__ : Optional[Any] = {
"""instructblip-vicuna-7b""": ("""blip2_vicuna_instruct""", """vicuna7b"""),
"""instructblip-vicuna-13b""": ("""blip2_vicuna_instruct""", """vicuna13b"""),
"""instructblip-flan-t5-xl""": ("""blip2_t5_instruct""", """flant5xl"""),
"""instructblip-flan-t5-xxl""": ("""blip2_t5_instruct""", """flant5xxl"""),
}
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
lowerCAmelCase__ : str = """cuda:1""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase__ : Tuple = """cuda:2""" if torch.cuda.is_available() else """cpu"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = load_model_and_preprocess(
name=UpperCamelCase , model_type=UpperCamelCase , is_eval=UpperCamelCase , device=UpperCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
lowerCAmelCase__ : Union[str, Any] = original_model.state_dict()
lowerCAmelCase__ : List[Any] = create_rename_keys(UpperCamelCase )
for src, dest in rename_keys:
rename_key(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
lowerCAmelCase__ : List[str] = state_dict.pop(UpperCamelCase )
if key.startswith("""Qformer.bert""" ):
lowerCAmelCase__ : Optional[int] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
lowerCAmelCase__ : Tuple = key.replace("""self""" , """attention""" )
if "llm_proj" in key:
lowerCAmelCase__ : int = key.replace("""llm_proj""" , """language_projection""" )
if "t5_proj" in key:
lowerCAmelCase__ : Optional[int] = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""llm_model""" ):
lowerCAmelCase__ : Optional[int] = key.replace("""llm_model""" , """language_model""" )
if key.startswith("""t5""" ):
lowerCAmelCase__ : Tuple = key.replace("""t5""" , """language""" )
lowerCAmelCase__ : int = val
# read in qv biases
read_in_q_v_bias(UpperCamelCase , UpperCamelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(UpperCamelCase , strict=UpperCamelCase )
lowerCAmelCase__ : List[str] = load_demo_image()
lowerCAmelCase__ : Optional[Any] = """What is unusual about this image?"""
# create processor
lowerCAmelCase__ : Dict = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=UpperCamelCase , image_std=UpperCamelCase )
lowerCAmelCase__ : List[str] = InstructBlipProcessor(
image_processor=UpperCamelCase , tokenizer=UpperCamelCase , qformer_tokenizer=UpperCamelCase , )
lowerCAmelCase__ : str = processor(images=UpperCamelCase , text=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase )
# make sure processor creates exact same pixel values
lowerCAmelCase__ : Tuple = vis_processors["""eval"""](UpperCamelCase ).unsqueeze(0 ).to(UpperCamelCase )
lowerCAmelCase__ : Optional[int] = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , UpperCamelCase )
original_model.to(UpperCamelCase )
hf_model.to(UpperCamelCase )
with torch.no_grad():
if "vicuna" in model_name:
lowerCAmelCase__ : Union[str, Any] = original_model({"""image""": original_pixel_values, """text_input""": [prompt]} ).logits
lowerCAmelCase__ : Union[str, Any] = hf_model(**UpperCamelCase ).logits
else:
lowerCAmelCase__ : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": [prompt], """text_output""": ["""\n"""]} ).logits
lowerCAmelCase__ : Optional[Any] = tokenizer("""\n""" , return_tensors="""pt""" ).input_ids.to(UpperCamelCase )
lowerCAmelCase__ : Any = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -100 )
lowerCAmelCase__ : Any = hf_model(**UpperCamelCase , labels=UpperCamelCase ).logits
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
lowerCAmelCase__ : Any = 1e-4 if """vicuna""" in model_name else 1e-5
assert torch.allclose(original_logits.to(logits.device ) , UpperCamelCase , atol=UpperCamelCase )
print("""Looks ok!""" )
print("""Generating with original model...""" )
lowerCAmelCase__ : Any = original_model.generate({"""image""": original_pixel_values, """prompt""": prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print("""Generating with HF model...""" )
lowerCAmelCase__ : Optional[int] = hf_model.generate(
**UpperCamelCase , do_sample=UpperCamelCase , num_beams=5 , max_length=256 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
lowerCAmelCase__ : Any = 2
print("""Original generation:""" , UpperCamelCase )
lowerCAmelCase__ : str = processor.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
lowerCAmelCase__ : str = [text.strip() for text in output_text]
print("""HF generation:""" , UpperCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(UpperCamelCase )
hf_model.save_pretrained(UpperCamelCase )
if push_to_hub:
processor.push_to_hub(f"""Salesforce/{model_name}""" )
hf_model.push_to_hub(f"""Salesforce/{model_name}""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
_lowerCAmelCase = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
_lowerCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 37
|
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_A = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class lowercase_ ( unittest.TestCase ):
A__ : List[str] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
A__ : Union[str, Any] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
A__ : Union[str, Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
A__ : List[Any] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = ZeroShotClassificationPipeline(
model=__UpperCamelCase , tokenizer=__UpperCamelCase , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
# No kwarg
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCamelCase_ = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
UpperCamelCase_ = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
UpperCamelCase_ = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(1 )
] , )
UpperCamelCase_ = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__UpperCamelCase ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(__UpperCamelCase ):
classifier(__UpperCamelCase , candidate_labels="""politics""" )
with self.assertRaises(__UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(__UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(__UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=__UpperCamelCase , )
self.run_entailment_id(__UpperCamelCase )
def lowerCamelCase_ ( self , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = zero_shot_classifier.model.config
UpperCamelCase_ = config.labelaid
UpperCamelCase_ = zero_shot_classifier.entailment_id
UpperCamelCase_ = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
UpperCamelCase_ = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCamelCase_ = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
UpperCamelCase_ = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
UpperCamelCase_ = original_labelaid
self.assertEqual(__UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_0_0 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
UpperCamelCase_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
UpperCamelCase_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.333, 0.333, 0.333],
} , )
@slow
@require_torch
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
UpperCamelCase_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
UpperCamelCase_ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
@slow
@require_tf
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
UpperCamelCase_ = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.976, 0.015, 0.009],
} , )
UpperCamelCase_ = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.817, 0.713, 0.018, 0.018],
} , )
| 122
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"vocab_file": "vocab.txt"}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
_SCREAMING_SNAKE_CASE : List[str] = {
"YituTech/conv-bert-base": 512,
"YituTech/conv-bert-medium-small": 512,
"YituTech/conv-bert-small": 512,
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class _snake_case ( _a ):
lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : str = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Optional[int] = ConvBertTokenizer
def __init__( self , a__=None , a__=None , a__=True , a__="[UNK]" , a__="[SEP]" , a__="[PAD]" , a__="[CLS]" , a__="[MASK]" , a__=True , a__=None , **a__ , ) -> int:
'''simple docstring'''
super().__init__(
_a , tokenizer_file=_a , do_lower_case=_a , unk_token=_a , sep_token=_a , pad_token=_a , cls_token=_a , mask_token=_a , tokenize_chinese_chars=_a , strip_accents=_a , **_a , )
snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _a ) != do_lower_case
or normalizer_state.get("strip_accents" , _a ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _a ) != tokenize_chinese_chars
):
snake_case_ = getattr(_a , normalizer_state.pop("type" ) )
snake_case_ = do_lower_case
snake_case_ = strip_accents
snake_case_ = tokenize_chinese_chars
snake_case_ = normalizer_class(**_a )
snake_case_ = do_lower_case
def lowerCAmelCase__ ( self , a__ , a__=None ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Any:
'''simple docstring'''
snake_case_ = [self.sep_token_id]
snake_case_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Dict:
'''simple docstring'''
snake_case_ = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
| 370
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
_SCREAMING_SNAKE_CASE : Dict = logging.getLogger(__name__)
_SCREAMING_SNAKE_CASE : List[str] = {"facebook/bart-base": BartForConditionalGeneration}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {"facebook/bart-base": BartTokenizer}
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=snake_case , default=snake_case , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=snake_case , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=snake_case , default=snake_case , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=snake_case , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case , )
parser.add_argument(
"--config_name" , type=snake_case , default=snake_case , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=snake_case , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=snake_case , default=snake_case , help="Where to store the final ONNX file." )
snake_case_ = parser.parse_args()
return args
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[str]="cpu" ):
'''simple docstring'''
snake_case_ = model_dict[model_name].from_pretrained(snake_case ).to(snake_case )
snake_case_ = tokenizer_dict[model_name].from_pretrained(snake_case )
if model_name in ["facebook/bart-base"]:
snake_case_ = 0
snake_case_ = None
snake_case_ = 0
return huggingface_model, tokenizer
def UpperCamelCase_( snake_case : int , snake_case : Tuple , snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : Optional[Any] ):
'''simple docstring'''
model.eval()
snake_case_ = None
snake_case_ = torch.jit.script(BARTBeamSearchGenerator(snake_case ) )
with torch.no_grad():
snake_case_ = "My friends are cool but they eat too many carbs."
snake_case_ = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="pt" ).to(model.device )
snake_case_ = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=snake_case , max_length=snake_case , early_stopping=snake_case , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
snake_case , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , snake_case , opset_version=1_4 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=snake_case , )
logger.info("Model exported to {}".format(snake_case ) )
snake_case_ = remove_dup_initializers(os.path.abspath(snake_case ) )
logger.info("Deduplicated and optimized model written to {}".format(snake_case ) )
snake_case_ = onnxruntime.InferenceSession(snake_case )
snake_case_ = ort_sess.run(
snake_case , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(snake_case ),
"max_length": np.array(snake_case ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def UpperCamelCase_( ):
'''simple docstring'''
snake_case_ = parse_args()
snake_case_ = 5
snake_case_ = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
snake_case_ = torch.device(args.device )
snake_case_ , snake_case_ = load_model_tokenizer(args.model_name_or_path , snake_case )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(snake_case )
if args.max_length:
snake_case_ = args.max_length
if args.num_beams:
snake_case_ = args.num_beams
if args.output_file_path:
snake_case_ = args.output_file_path
else:
snake_case_ = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(snake_case , snake_case , snake_case , snake_case , snake_case )
if __name__ == "__main__":
main()
| 92
| 0
|
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Dict = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = AlbertTokenizer
snake_case_ = AlbertTokenizerFast
snake_case_ = True
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__A = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Tuple ,A : List[str] ):
__A = "this is a test"
__A = "this is a test"
return input_text, output_text
def UpperCamelCase_ ( self : List[Any] ):
__A = "<pad>"
__A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"▁eloquent" )
self.assertEqual(len(A ) ,3_00_00 )
def UpperCamelCase_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase_ ( self : str ):
if not self.test_rust_tokenizer:
return
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
__A = "I was born in 92000, and this is falsé."
__A = tokenizer.tokenize(A )
__A = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokenizer.encode(A ,add_special_tokens=A )
__A = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
__A = self.get_rust_tokenizer()
__A = tokenizer.encode(A )
__A = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Any ):
__A = AlbertTokenizer(A ,keep_accents=A )
__A = tokenizer.tokenize("This is a test" )
self.assertListEqual(A ,["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[48, 25, 21, 12_89] )
__A = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A ,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__A = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A ,[31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__A = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A ,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] ,)
def UpperCamelCase_ ( self : List[str] ):
__A = AlbertTokenizer(A )
__A = tokenizer.encode("sequence builders" )
__A = tokenizer.encode("multi-sequence build" )
__A = tokenizer.build_inputs_with_special_tokens(A )
__A = tokenizer.build_inputs_with_special_tokens(A ,A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase_ ( self : int ):
# fmt: off
__A = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="albert-base-v2" ,revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" ,)
| 15
|
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63
| 0
|
from math import isqrt
def _a ( lowerCamelCase: int ) -> list[int]:
'''simple docstring'''
__A = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCamelCase , lowerCamelCase ):
__A = False
return [i for i in range(2 , lowerCamelCase ) if is_prime[i]]
def _a ( lowerCamelCase: int = 10**8 ) -> int:
'''simple docstring'''
__A = calculate_prime_numbers(max_number // 2 )
__A = 0
__A = 0
__A = len(lowerCamelCase ) - 1
while left <= right:
while prime_numbers[left] * prime_numbers[right] >= max_number:
right -= 1
semiprimes_count += right - left + 1
left += 1
return semiprimes_count
if __name__ == "__main__":
print(f'{solution() = }')
| 360
|
from __future__ import annotations
from math import pi, sqrt
def _a ( lowerCamelCase: float , lowerCamelCase: float ) -> tuple:
'''simple docstring'''
if inductance <= 0:
raise ValueError('''Inductance cannot be 0 or negative''' )
elif capacitance <= 0:
raise ValueError('''Capacitance cannot be 0 or negative''' )
else:
return (
"Resonant frequency",
float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 250
| 0
|
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowercase ( __lowercase , __lowercase , __lowercase = 10**-10 ) -> float:
'''simple docstring'''
_A = a
while True:
_A = Decimal(__lowercase ) - (
Decimal(eval(__lowercase ) ) / Decimal(eval(str(diff(__lowercase ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(__lowercase ) ) < precision: # noqa: S307
return float(__lowercase )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}""")
| 79
|
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
_UpperCAmelCase = multiprocessing.Manager()
_UpperCAmelCase = manager.list()
_UpperCAmelCase = multiprocessing.Process(target=_UpperCAmelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def A ( _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
_UpperCAmelCase = shutil.rmtree
_UpperCAmelCase = os.rmdir
_UpperCAmelCase = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
_UpperCAmelCase = {}
with swallow_io():
with time_limit(_UpperCAmelCase ):
exec(_UpperCAmelCase , _UpperCAmelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(F"failed: {e}" )
# Needed for cleaning up.
_UpperCAmelCase = rmtree
_UpperCAmelCase = rmdir
_UpperCAmelCase = chdir
@contextlib.contextmanager
def A ( _UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
def signal_handler(_UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , _UpperCAmelCase )
signal.signal(signal.SIGALRM , _UpperCAmelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def A ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = WriteOnlyStringIO()
with contextlib.redirect_stdout(_UpperCAmelCase ):
with contextlib.redirect_stderr(_UpperCAmelCase ):
with redirect_stdin(_UpperCAmelCase ):
yield
@contextlib.contextmanager
def A ( ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as dirname:
with chdir(_UpperCAmelCase ):
yield dirname
class __lowerCAmelCase ( A ):
pass
class __lowerCAmelCase ( io.StringIO ):
def _lowerCamelCase ( self : Tuple , *A : str , **A : Any) -> Any:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : List[str] , *A : Optional[Any] , **A : Optional[Any]) -> Optional[int]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : str , *A : List[str] , **A : List[Any]) -> Union[str, Any]:
"""simple docstring"""
raise OSError
def _lowerCamelCase ( self : Union[str, Any] , *A : Optional[Any] , **A : List[str]) -> Optional[int]:
"""simple docstring"""
return False
class __lowerCAmelCase ( contextlib._RedirectStream ): # type: ignore
UpperCamelCase = '''stdin'''
@contextlib.contextmanager
def A ( _UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
if root == ".":
yield
return
_UpperCAmelCase = os.getcwd()
os.chdir(_UpperCAmelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(_UpperCAmelCase )
def A ( _UpperCAmelCase : List[str]=None ) -> Any:
'''simple docstring'''
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
_UpperCAmelCase = None
_UpperCAmelCase = None
import os
_UpperCAmelCase = '1'
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import shutil
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
import subprocess
_UpperCAmelCase = None # type: ignore
_UpperCAmelCase = None
import sys
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
| 339
| 0
|
"""simple docstring"""
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a__ : int = logging.get_logger(__name__)
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = to_pil_image(lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pil_image.size
__SCREAMING_SNAKE_CASE = pytesseract.image_to_data(lowerCAmelCase_ , lang=lowerCAmelCase_ , output_type="dict" , config=lowerCAmelCase_ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
__SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(lowerCAmelCase_ ) if not word.strip()]
__SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(lowerCAmelCase_ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(lowerCAmelCase_ )
# finally, normalize the bounding boxes
__SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : Optional[Any] = ["pixel_values"]
def __init__( self : Dict , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : float = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = "" , **UpperCAmelCase__ : Optional[Any] , ) -> None:
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 2_2_4, "width": 2_2_4}
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_value
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
__SCREAMING_SNAKE_CASE = apply_ocr
__SCREAMING_SNAKE_CASE = ocr_lang
__SCREAMING_SNAKE_CASE = tesseract_config
def UpperCAmelCase_ ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> np.ndarray:
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[Any] , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ) -> np.ndarray:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, Iterable[float]] , UpperCAmelCase__ : Union[float, Iterable[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : float = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : Union[float, Iterable[float]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[str] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : List[Any] , ) -> PIL.Image.Image:
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
__SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
__SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
__SCREAMING_SNAKE_CASE = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(UpperCAmelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for image in images:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = apply_tesseract(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
words_batch.append(UpperCAmelCase__ )
boxes_batch.append(UpperCAmelCase__ )
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
__SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=UpperCAmelCase__ )
if apply_ocr:
__SCREAMING_SNAKE_CASE = words_batch
__SCREAMING_SNAKE_CASE = boxes_batch
return data
| 195
|
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
print(f"""Vertex\tShortest Distance from vertex {src}""" )
for i, d in enumerate(lowerCAmelCase_ ):
print(f"""{i}\t\t{d}""" )
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
return True
return False
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [float("inf" )] * vertex_count
__SCREAMING_SNAKE_CASE = 0.0
for _ in range(vertex_count - 1 ):
for j in range(lowerCAmelCase_ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (graph[j][k] for k in ["src", "dst", "weight"])
if distance[u] != float("inf" ) and distance[u] + w < distance[v]:
__SCREAMING_SNAKE_CASE = distance[u] + w
__SCREAMING_SNAKE_CASE = check_negative_cycle(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if negative_cycle_exists:
raise Exception("Negative cycle found" )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
a__ : Union[str, Any] = int(input('''Enter number of vertices: ''').strip())
a__ : Any = int(input('''Enter number of edges: ''').strip())
a__ : list[dict[str, int]] = [{} for _ in range(E)]
for i in range(E):
print('''Edge ''', i + 1)
a__ , a__ , a__ : str = (
int(x)
for x in input('''Enter source, destination, weight: ''').strip().split(''' ''')
)
a__ : str = {'''src''': src, '''dst''': dest, '''weight''': weight}
a__ : str = int(input('''\nEnter shortest path source:''').strip())
a__ : List[Any] = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 195
| 1
|
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import VideoMAEConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEModel,
)
from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __lowercase :
"""simple docstring"""
def __init__( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[int]=13 , lowerCAmelCase__ : Optional[int]=10 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Tuple=2 , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Any=2 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Dict=32 , lowerCAmelCase__ : Any=5 , lowerCAmelCase__ : Any=4 , lowerCAmelCase__ : Optional[Any]=37 , lowerCAmelCase__ : str="gelu" , lowerCAmelCase__ : Tuple=0.1 , lowerCAmelCase__ : List[str]=0.1 , lowerCAmelCase__ : Optional[Any]=10 , lowerCAmelCase__ : Tuple=0.02 , lowerCAmelCase__ : str=0.9 , lowerCAmelCase__ : int=None , ):
SCREAMING_SNAKE_CASE_: str = parent
SCREAMING_SNAKE_CASE_: int = batch_size
SCREAMING_SNAKE_CASE_: Dict = image_size
SCREAMING_SNAKE_CASE_: Any = num_channels
SCREAMING_SNAKE_CASE_: int = patch_size
SCREAMING_SNAKE_CASE_: int = tubelet_size
SCREAMING_SNAKE_CASE_: Optional[int] = num_frames
SCREAMING_SNAKE_CASE_: int = is_training
SCREAMING_SNAKE_CASE_: Dict = use_labels
SCREAMING_SNAKE_CASE_: Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE_: List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_: Optional[Any] = num_attention_heads
SCREAMING_SNAKE_CASE_: str = intermediate_size
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE_: Dict = initializer_range
SCREAMING_SNAKE_CASE_: Any = mask_ratio
SCREAMING_SNAKE_CASE_: Dict = scope
# in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame
SCREAMING_SNAKE_CASE_: Any = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_: Dict = (num_frames // tubelet_size) * self.num_patches_per_frame
# use this variable to define bool_masked_pos
SCREAMING_SNAKE_CASE_: Dict = int(mask_ratio * self.seq_length)
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_: Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
SCREAMING_SNAKE_CASE_: Tuple = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : int):
return VideoMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase__ , initializer_range=self.initializer_range , )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[Any]):
SCREAMING_SNAKE_CASE_: Optional[Any] = VideoMAEModel(config=lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(lowerCAmelCase__)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[Any] = VideoMAEForPreTraining(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE_: Dict = torch.ones((self.num_masks,))
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0))])
SCREAMING_SNAKE_CASE_: List[str] = mask.expand(self.batch_size , -1).bool()
SCREAMING_SNAKE_CASE_: List[Any] = model(lowerCAmelCase__ , lowerCAmelCase__)
# model only returns predictions for masked patches
SCREAMING_SNAKE_CASE_: Optional[int] = mask.sum().item()
SCREAMING_SNAKE_CASE_: str = 3 * self.tubelet_size * self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels))
def _SCREAMING_SNAKE_CASE ( self : str):
SCREAMING_SNAKE_CASE_: Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = config_and_inputs
SCREAMING_SNAKE_CASE_: int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = (
(VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else ()
)
_UpperCAmelCase : Dict = (
{'''feature-extraction''': VideoMAEModel, '''video-classification''': VideoMAEForVideoClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : List[Any] = False
_UpperCAmelCase : List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: List[Any] = VideoMAEModelTester(self)
SCREAMING_SNAKE_CASE_: Union[str, Any] = ConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase__ : int , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Dict=False):
SCREAMING_SNAKE_CASE_: Any = copy.deepcopy(lowerCAmelCase__)
if model_class == VideoMAEForPreTraining:
# important: each video needs to have the same number of masked patches
# hence we define a single mask, which we then repeat for each example in the batch
SCREAMING_SNAKE_CASE_: List[str] = torch.ones((self.model_tester.num_masks,))
SCREAMING_SNAKE_CASE_: List[str] = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0))])
SCREAMING_SNAKE_CASE_: Dict = mask.expand(self.model_tester.batch_size , -1).bool()
SCREAMING_SNAKE_CASE_: Union[str, Any] = bool_masked_pos.to(lowerCAmelCase__)
if return_labels:
if model_class in [
*get_values(lowerCAmelCase__),
]:
SCREAMING_SNAKE_CASE_: Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase__)
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int):
self.config_tester.run_common_tests()
@unittest.skip(reason="VideoMAE does not use inputs_embeds")
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: Tuple = model_class(lowerCAmelCase__)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
SCREAMING_SNAKE_CASE_: Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase__ , nn.Linear))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int = model_class(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_: Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_: int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_: Dict = VideoMAEModel.from_pretrained(lowerCAmelCase__)
self.assertIsNotNone(lowerCAmelCase__)
def _SCREAMING_SNAKE_CASE ( self : Tuple):
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_: Union[str, Any] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: int = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE_: Optional[Any] = (
num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
)
SCREAMING_SNAKE_CASE_: Union[str, Any] = True
SCREAMING_SNAKE_CASE_: Optional[int] = False
SCREAMING_SNAKE_CASE_: Optional[Any] = True
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: List[str] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_: Dict = True
SCREAMING_SNAKE_CASE_: Union[str, Any] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Union[str, Any] = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
SCREAMING_SNAKE_CASE_: Union[str, Any] = len(lowerCAmelCase__)
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_: Any = True
SCREAMING_SNAKE_CASE_: Tuple = True
SCREAMING_SNAKE_CASE_: List[str] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Dict = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
self.assertEqual(out_len + 1 , len(lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: str = outputs.attentions
self.assertEqual(len(lowerCAmelCase__) , self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _SCREAMING_SNAKE_CASE ( self : Any):
def check_hidden_states_output(lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = model_class(lowerCAmelCase__)
model.to(lowerCAmelCase__)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__))
SCREAMING_SNAKE_CASE_: Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE_: Dict = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowerCAmelCase__) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = self.model_tester.seq_length - self.model_tester.num_masks
SCREAMING_SNAKE_CASE_: Union[str, Any] = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_: List[str] = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_: Dict = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests.")
def _SCREAMING_SNAKE_CASE ( self : List[str]):
pass
def A_ ( ):
SCREAMING_SNAKE_CASE_: Dict = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
SCREAMING_SNAKE_CASE_: Optional[Any] = np.load(_UpperCAmelCase )
return list(_UpperCAmelCase )
@require_torch
@require_vision
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = VideoMAEForVideoClassification.from_pretrained("MCG-NJU/videomae-base-finetuned-kinetics").to(
lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE_: int = prepare_video()
SCREAMING_SNAKE_CASE_: Tuple = image_processor(lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Union[str, Any] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: str = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = torch.tensor([0.3669, -0.0688, -0.2421]).to(lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Union[str, Any] = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short").to(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = self.default_image_processor
SCREAMING_SNAKE_CASE_: Optional[int] = prepare_video()
SCREAMING_SNAKE_CASE_: Optional[int] = image_processor(lowerCAmelCase__ , return_tensors="pt").to(lowerCAmelCase__)
# add boolean mask, indicating which patches to mask
SCREAMING_SNAKE_CASE_: List[Any] = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos" , filename="bool_masked_pos.pt")
SCREAMING_SNAKE_CASE_: str = torch.load(lowerCAmelCase__)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Optional[int] = model(**lowerCAmelCase__)
# verify the logits
SCREAMING_SNAKE_CASE_: Optional[int] = torch.Size([1, 1408, 1536])
SCREAMING_SNAKE_CASE_: Optional[int] = torch.tensor(
[[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]] , device=lowerCAmelCase__)
self.assertEqual(outputs.logits.shape , lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase__ , atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `True`)
SCREAMING_SNAKE_CASE_: str = torch.tensor([0.5142] , device=lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase__ , atol=1E-4))
# verify the loss (`config.norm_pix_loss` = `False`)
SCREAMING_SNAKE_CASE_: int = VideoMAEForPreTraining.from_pretrained("MCG-NJU/videomae-base-short" , norm_pix_loss=lowerCAmelCase__).to(
lowerCAmelCase__)
with torch.no_grad():
SCREAMING_SNAKE_CASE_: Any = model(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = torch.tensor(torch.tensor([0.6469]) , device=lowerCAmelCase__)
self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase__ , atol=1E-4))
| 13
|
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : int):
SCREAMING_SNAKE_CASE_: str = jnp.ones((batch_size, length)) / length
return scores
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Dict = None
SCREAMING_SNAKE_CASE_: str = 20
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(batch_size=2 , length=lowerCAmelCase__)
# tweak scores to not be uniform anymore
SCREAMING_SNAKE_CASE_: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
SCREAMING_SNAKE_CASE_: Any = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
SCREAMING_SNAKE_CASE_: Dict = jax.nn.softmax(lowerCAmelCase__ , axis=-1)
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: List[str] = FlaxTemperatureLogitsWarper(temperature=1.3)
SCREAMING_SNAKE_CASE_: str = jax.nn.softmax(temp_dist_warper_sharper(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
SCREAMING_SNAKE_CASE_: int = jax.nn.softmax(temp_dist_warper_smoother(lowerCAmelCase__ , scores.copy() , cur_len=lowerCAmelCase__) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: List[str] = None
SCREAMING_SNAKE_CASE_: str = 10
SCREAMING_SNAKE_CASE_: Tuple = 2
# create ramp distribution
SCREAMING_SNAKE_CASE_: Optional[Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy()
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1:, : vocab_size // 2] + vocab_size
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
SCREAMING_SNAKE_CASE_: Any = 5
SCREAMING_SNAKE_CASE_: str = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
SCREAMING_SNAKE_CASE_: Any = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, length)).copy()
SCREAMING_SNAKE_CASE_: Any = top_k_warp_safety_check(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def _SCREAMING_SNAKE_CASE ( self : int):
SCREAMING_SNAKE_CASE_: Tuple = None
SCREAMING_SNAKE_CASE_: Dict = 10
SCREAMING_SNAKE_CASE_: Dict = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
SCREAMING_SNAKE_CASE_: Tuple = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
SCREAMING_SNAKE_CASE_: int = FlaxTopPLogitsWarper(0.8)
SCREAMING_SNAKE_CASE_: Optional[Any] = np.exp(top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
SCREAMING_SNAKE_CASE_: Dict = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# check edge cases with negative and extreme logits
SCREAMING_SNAKE_CASE_: Union[str, Any] = np.broadcast_to(np.arange(lowerCAmelCase__)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
SCREAMING_SNAKE_CASE_: Dict = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
SCREAMING_SNAKE_CASE_: str = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def _SCREAMING_SNAKE_CASE ( self : Tuple):
SCREAMING_SNAKE_CASE_: Tuple = 20
SCREAMING_SNAKE_CASE_: List[str] = 4
SCREAMING_SNAKE_CASE_: Optional[int] = 0
SCREAMING_SNAKE_CASE_: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
# check that min length is applied at length 5
SCREAMING_SNAKE_CASE_: str = ids_tensor((batch_size, 20) , vocab_size=20)
SCREAMING_SNAKE_CASE_: int = 5
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
SCREAMING_SNAKE_CASE_: List[str] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = 15
SCREAMING_SNAKE_CASE_: Any = min_dist_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: int = 20
SCREAMING_SNAKE_CASE_: str = 4
SCREAMING_SNAKE_CASE_: List[Any] = 0
SCREAMING_SNAKE_CASE_: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the bos_token_id score
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, 1) , vocab_size=20)
SCREAMING_SNAKE_CASE_: List[str] = 1
SCREAMING_SNAKE_CASE_: Union[str, Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
SCREAMING_SNAKE_CASE_: List[Any] = 3
SCREAMING_SNAKE_CASE_: Optional[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : List[str]):
SCREAMING_SNAKE_CASE_: Any = 20
SCREAMING_SNAKE_CASE_: Optional[Any] = 4
SCREAMING_SNAKE_CASE_: Dict = 0
SCREAMING_SNAKE_CASE_: List[Any] = 5
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
# check that all scores are -inf except the eos_token_id when max_length is reached
SCREAMING_SNAKE_CASE_: List[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: Dict = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
SCREAMING_SNAKE_CASE_: List[str] = 3
SCREAMING_SNAKE_CASE_: str = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = logits_processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
self.assertFalse(jnp.isinf(lowerCAmelCase__).any())
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
SCREAMING_SNAKE_CASE_: int = 4
SCREAMING_SNAKE_CASE_: List[Any] = 10
SCREAMING_SNAKE_CASE_: int = 15
SCREAMING_SNAKE_CASE_: Dict = 2
SCREAMING_SNAKE_CASE_: int = 1
SCREAMING_SNAKE_CASE_: List[Any] = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: int = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Tuple = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: Optional[int] = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
SCREAMING_SNAKE_CASE_: Dict = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Any = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: int = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Tuple = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# with processor list
SCREAMING_SNAKE_CASE_: str = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Tuple = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def _SCREAMING_SNAKE_CASE ( self : Any):
SCREAMING_SNAKE_CASE_: Optional[int] = 4
SCREAMING_SNAKE_CASE_: int = 10
SCREAMING_SNAKE_CASE_: List[str] = 15
SCREAMING_SNAKE_CASE_: List[Any] = 2
SCREAMING_SNAKE_CASE_: Union[str, Any] = 1
SCREAMING_SNAKE_CASE_: str = 15
# dummy input_ids and scores
SCREAMING_SNAKE_CASE_: Tuple = ids_tensor((batch_size, sequence_length) , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = input_ids.copy()
SCREAMING_SNAKE_CASE_: List[Any] = self._get_uniform_logits(lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = scores.copy()
# instantiate all dist processors
SCREAMING_SNAKE_CASE_: Dict = FlaxTemperatureLogitsWarper(temperature=0.5)
SCREAMING_SNAKE_CASE_: Union[str, Any] = FlaxTopKLogitsWarper(3)
SCREAMING_SNAKE_CASE_: Dict = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
SCREAMING_SNAKE_CASE_: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: str = 10
# no processor list
def run_no_processor_list(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Dict):
SCREAMING_SNAKE_CASE_: Any = temp_dist_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_k_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = top_p_warp(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = min_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = bos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = eos_dist_proc(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
# with processor list
def run_processor_list(lowerCAmelCase__ : int , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
SCREAMING_SNAKE_CASE_: Dict = processor(lowerCAmelCase__ , lowerCAmelCase__ , cur_len=lowerCAmelCase__)
return scores
SCREAMING_SNAKE_CASE_: str = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = jax.jit(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Dict = jitted_run_no_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: List[Any] = jitted_run_processor_list(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
# scores should be equal
self.assertTrue(jnp.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 13
| 1
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=True , UpperCAmelCase=2 ) -> str:
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : Optional[Any] = take_from
lowerCamelCase__ : Any = ()
if not isinstance(args[0] , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : List[Any] = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
lowerCamelCase__ : Tuple = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : int = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : int = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Optional[Any] = call_frame.filename
lowerCamelCase__ : List[Any] = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 359
|
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def _a ( *UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase=True , UpperCAmelCase=2 ) -> str:
"""simple docstring"""
from .. import __version__
lowerCamelCase__ : Optional[Any] = take_from
lowerCamelCase__ : Any = ()
if not isinstance(args[0] , UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(UpperCAmelCase ).base_version ) >= version.parse(UpperCAmelCase ):
raise ValueError(
f"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"
f" version {__version__} is >= {version_name}" )
lowerCamelCase__ : List[Any] = None
if isinstance(UpperCAmelCase , UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(UpperCAmelCase ),)
lowerCamelCase__ : Tuple = f"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(UpperCAmelCase , UpperCAmelCase ):
values += (getattr(UpperCAmelCase , UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = f"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
lowerCamelCase__ : int = f"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
lowerCamelCase__ : int = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , UpperCAmelCase , stacklevel=UpperCAmelCase )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) > 0:
lowerCamelCase__ : Tuple = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Optional[Any] = call_frame.filename
lowerCamelCase__ : List[Any] = call_frame.lineno
lowerCamelCase__ : Optional[Any] = call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(UpperCAmelCase ) == 0:
return
elif len(UpperCAmelCase ) == 1:
return values[0]
return values
| 265
| 0
|
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowercase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__lowercase = 250004
__lowercase = 250020
@require_sentencepiece
@require_tokenizers
class _A ( _a ,unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : Optional[Any] = MBartTokenizer
UpperCAmelCase : Dict = MBartTokenizerFast
UpperCAmelCase : Optional[Any] = True
UpperCAmelCase : str = True
def __snake_case ( self : List[Any]):
super().setUp()
# We have a SentencePiece fixture for testing
a : Any = MBartTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __snake_case ( self : List[Any]):
a : str = MBartTokenizer(__UpperCAmelCase , keep_accents=__UpperCAmelCase)
a : Union[str, Any] = tokenizer.tokenize("This is a test")
self.assertListEqual(__UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
a : int = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
a : Optional[Any] = tokenizer.convert_tokens_to_ids(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
# ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^
] , )
a : Tuple = tokenizer.convert_ids_to_tokens(__UpperCAmelCase)
self.assertListEqual(
__UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __snake_case ( self : Any):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a : Optional[Any] = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-mbart", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})'''):
a : Dict = self.rust_tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = self.tokenizer_class.from_pretrained(__UpperCAmelCase , **__UpperCAmelCase)
a : Optional[int] = tempfile.mkdtemp()
a : Optional[int] = tokenizer_r.save_pretrained(__UpperCAmelCase)
a : List[Any] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
a : Optional[int] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase)
# Checks everything loads correctly in the same way
a : Dict = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : int = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(__UpperCAmelCase)
# Save tokenizer rust, legacy_format=True
a : Optional[Any] = tempfile.mkdtemp()
a : Tuple = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase)
a : List[str] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(__UpperCAmelCase , __UpperCAmelCase)
# Checks everything loads correctly in the same way
a : Dict = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : Optional[Any] = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
shutil.rmtree(__UpperCAmelCase)
# Save tokenizer rust, legacy_format=False
a : Dict = tempfile.mkdtemp()
a : List[str] = tokenizer_r.save_pretrained(__UpperCAmelCase , legacy_format=__UpperCAmelCase)
a : Optional[int] = tokenizer_p.save_pretrained(__UpperCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
a : Any = tokenizer_r.from_pretrained(__UpperCAmelCase)
a : Any = tokenizer_p.from_pretrained(__UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(__UpperCAmelCase , __UpperCAmelCase))
shutil.rmtree(__UpperCAmelCase)
@require_torch
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : List[str] = """facebook/mbart-large-en-ro"""
UpperCAmelCase : Optional[Any] = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
UpperCAmelCase : str = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
UpperCAmelCase : str = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE]
@classmethod
def __snake_case ( cls : str):
a : MBartTokenizer = MBartTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en_XX" , tgt_lang="ro_RO")
a : Optional[int] = 1
return cls
def __snake_case ( self : Optional[Any]):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ar_AR"] , 250001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["en_EN"] , 250004)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ro_RO"] , 250020)
def __snake_case ( self : Union[str, Any]):
a : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase)
def __snake_case ( self : int):
self.assertIn(__UpperCAmelCase , self.tokenizer.all_special_ids)
a : Tuple = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
a : List[Any] = self.tokenizer.decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase)
a : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__UpperCAmelCase)
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , __UpperCAmelCase)
def __snake_case ( self : List[str]):
a : List[Any] = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , __UpperCAmelCase)
a : Tuple = 10
a : Dict = self.tokenizer(__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase).input_ids[0]
self.assertEqual(ids[-2] , 2)
self.assertEqual(ids[-1] , __UpperCAmelCase)
self.assertEqual(len(__UpperCAmelCase) , __UpperCAmelCase)
def __snake_case ( self : List[str]):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [250026, 250001])
def __snake_case ( self : str):
a : List[str] = tempfile.mkdtemp()
a : Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__UpperCAmelCase)
a : Dict = MBartTokenizer.from_pretrained(__UpperCAmelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __UpperCAmelCase)
@require_torch
def __snake_case ( self : Dict):
a : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , return_tensors="pt")
a : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE]
assert batch.decoder_input_ids[1][0].tolist() == RO_CODE
assert batch.decoder_input_ids[1][-1] == 2
assert batch.labels[1][-2:].tolist() == [2, RO_CODE]
@require_torch
def __snake_case ( self : List[Any]):
a : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
a : Dict = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id)
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
self.assertEqual((2, 14) , batch.input_ids.shape)
self.assertEqual((2, 14) , batch.attention_mask.shape)
a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __UpperCAmelCase)
self.assertEqual(2 , batch.decoder_input_ids[0, -1]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE])
def __snake_case ( self : Optional[int]):
a : List[str] = self.tokenizer(self.src_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=3 , return_tensors="pt")
a : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=10 , return_tensors="pt")
a : str = targets["input_ids"]
a : Optional[Any] = shift_tokens_right(__UpperCAmelCase , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def __snake_case ( self : Any):
a : Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="ar_AR")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , {
# A, test, EOS, en_XX
"input_ids": [[62, 3034, 2, 250004]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 250001,
} , )
| 40
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_tf_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_tf_available():
import tensorflow as tf
UpperCamelCase__ = logging.get_logger(__name__)
@dataclass
class a__ ( snake_case__ ):
_a : List[str] = [
"""no_inference""",
"""no_cuda""",
"""no_tpu""",
"""no_speed""",
"""no_memory""",
"""no_env_print""",
"""no_multi_process""",
]
def __init__( self , **_A ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
__lowerCAmelCase = deprecated_arg[3:]
__lowerCAmelCase = not kwargs.pop(_A )
logger.warning(
f"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or"""
f""" {positive_arg}={kwargs[positive_arg]}""" )
__lowerCAmelCase = kwargs.pop("tpu_name" , self.tpu_name )
__lowerCAmelCase = kwargs.pop("device_idx" , self.device_idx )
__lowerCAmelCase = kwargs.pop("eager_mode" , self.eager_mode )
__lowerCAmelCase = kwargs.pop("use_xla" , self.use_xla )
super().__init__(**_A )
_a : str = field(
default=snake_case__ , metadata={"""help""": """Name of TPU"""} , )
_a : int = field(
default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , )
_a : bool = field(default=snake_case__ , metadata={"""help""": """Benchmark models in eager model."""} )
_a : bool = field(
default=snake_case__ , metadata={
"""help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`."""
} , )
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
__lowerCAmelCase = None
if self.tpu:
try:
if self.tpu_name:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name )
else:
__lowerCAmelCase = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
__lowerCAmelCase = None
return tpu
@cached_property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.is_tpu:
tf.config.experimental_connect_to_cluster(self._setup_tpu )
tf.tpu.experimental.initialize_tpu_system(self._setup_tpu )
__lowerCAmelCase = tf.distribute.TPUStrategy(self._setup_tpu )
else:
# currently no multi gpu is allowed
if self.is_gpu:
# TODO: Currently only single GPU is supported
tf.config.set_visible_devices(self.gpu_list[self.device_idx] , "GPU" )
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/gpu:{self.device_idx}""" )
else:
tf.config.set_visible_devices([] , "GPU" ) # disable GPU
__lowerCAmelCase = tf.distribute.OneDeviceStrategy(device=f"""/cpu:{self.device_idx}""" )
return strategy
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_tpu is not None
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return self._setup_strategy
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
return tf.config.list_physical_devices("GPU" )
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
requires_backends(self , ["tf"] )
if self.cuda:
return len(self.gpu_list )
return 0
@property
def __SCREAMING_SNAKE_CASE( self ):
"""simple docstring"""
return self.n_gpu > 0
| 92
| 0
|
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = ''''''
snake_case_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
snake_case_ = None # compression type in fsspec. ex: "gzip"
snake_case_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self, lowercase_ = "", lowercase_ = None, lowercase_ = None, **lowercase_ ) -> str:
super().__init__(self, **lowercase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
snake_case = fsspec.open(
lowercase_, mode='rb', protocol=lowercase_, compression=self.compression, client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs', {} ), # To avoid issues if it was already passed.
}, **(target_options or {}), )
snake_case = os.path.basename(self.file.path.split('::' )[0] )
snake_case = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
snake_case = None
@classmethod
def _lowerCamelCase ( cls, lowercase_ ) -> Any:
# compressed file paths are always relative to the archive root
return super()._strip_protocol(lowercase_ ).lstrip('/' )
def _lowerCamelCase ( self ) -> Optional[Any]:
if self.dir_cache is None:
snake_case = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
snake_case = {f['name']: f}
def _lowerCamelCase ( self, lowercase_ ) -> str:
return self.file.open().read()
def _lowerCamelCase ( self, lowercase_, lowercase_ = "rb", lowercase_=None, lowercase_=True, lowercase_=None, **lowercase_, ) -> Any:
snake_case = self._strip_protocol(lowercase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''bz2'''
snake_case_ = '''bz2'''
snake_case_ = '''.bz2'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''gzip'''
snake_case_ = '''gzip'''
snake_case_ = '''.gz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''lz4'''
snake_case_ = '''lz4'''
snake_case_ = '''.lz4'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''xz'''
snake_case_ = '''xz'''
snake_case_ = '''.xz'''
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = '''zstd'''
snake_case_ = '''zstd'''
snake_case_ = '''.zst'''
def __init__( self, lowercase_, lowercase_ = "rb", lowercase_ = None, lowercase_ = None, lowercase_ = DEFAULT_BLOCK_SIZE, **lowercase_, ) -> Union[str, Any]:
super().__init__(
fo=lowercase_, mode=lowercase_, target_protocol=lowercase_, target_options=lowercase_, block_size=lowercase_, **lowercase_, )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
snake_case = self.file.__enter__
class lowerCamelCase :
def __init__( self, lowercase_ ) -> List[Any]:
snake_case = file_
def __enter__( self ) -> Dict:
self._file.__enter__()
return self
def __exit__( self, *lowercase_, **lowercase_ ) -> Dict:
self._file.__exit__(*lowercase_, **lowercase_ )
def __iter__( self ) -> List[str]:
return iter(self._file )
def _lowerCamelCase ( self ) -> List[str]:
return next(self._file )
def __getattr__( self, lowercase_ ) -> List[Any]:
return getattr(self._file, lowercase_ )
def fixed_enter(*lowercase_, **lowercase_ ):
return WrappedFile(_enter(*lowercase_, **lowercase_ ) )
snake_case = fixed_enter
| 332
|
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
lowerCAmelCase_ = {
"vocab_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json",
},
"merges_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt",
},
"tokenizer_file": {
"allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json",
},
}
lowerCAmelCase_ = {
"allenai/led-base-16384": 1_6_3_8_4,
}
class lowerCamelCase ( __lowerCAmelCase ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = LEDTokenizer
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self, lowercase_=None, lowercase_=None, lowercase_=None, lowercase_="replace", lowercase_="<s>", lowercase_="</s>", lowercase_="</s>", lowercase_="<s>", lowercase_="<unk>", lowercase_="<pad>", lowercase_="<mask>", lowercase_=False, lowercase_=True, **lowercase_, ) -> int:
super().__init__(
lowercase_, lowercase_, tokenizer_file=lowercase_, errors=lowercase_, bos_token=lowercase_, eos_token=lowercase_, sep_token=lowercase_, cls_token=lowercase_, unk_token=lowercase_, pad_token=lowercase_, mask_token=lowercase_, add_prefix_space=lowercase_, trim_offsets=lowercase_, **lowercase_, )
snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = getattr(lowercase_, pre_tok_state.pop('type' ) )
snake_case = add_prefix_space
snake_case = pre_tok_class(**lowercase_ )
snake_case = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
snake_case = 'post_processor'
snake_case = getattr(self.backend_tokenizer, lowercase_, lowercase_ )
if tokenizer_component_instance:
snake_case = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case = tuple(state['sep'] )
if "cls" in state:
snake_case = tuple(state['cls'] )
snake_case = False
if state.get('add_prefix_space', lowercase_ ) != add_prefix_space:
snake_case = add_prefix_space
snake_case = True
if state.get('trim_offsets', lowercase_ ) != trim_offsets:
snake_case = trim_offsets
snake_case = True
if changes_to_apply:
snake_case = getattr(lowercase_, state.pop('type' ) )
snake_case = component_class(**lowercase_ )
setattr(self.backend_tokenizer, lowercase_, lowercase_ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _lowerCamelCase ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self, lowercase_ ) -> Any:
snake_case = AddedToken(lowercase_, lstrip=lowercase_, rstrip=lowercase_ ) if isinstance(lowercase_, lowercase_ ) else value
snake_case = value
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, *lowercase_, **lowercase_ ) -> BatchEncoding:
snake_case = kwargs.get('is_split_into_words', lowercase_ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*lowercase_, **lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> Tuple[str]:
snake_case = self._tokenizer.model.save(lowercase_, name=lowercase_ )
return tuple(lowercase_ )
def _lowerCamelCase ( self, lowercase_, lowercase_=None ) -> Dict:
snake_case = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None ) -> List[int]:
snake_case = [self.sep_token_id]
snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self, lowercase_, lowercase_ = None, lowercase_ = PaddingStrategy.DO_NOT_PAD, lowercase_ = None, lowercase_ = None, ) -> dict:
snake_case = super()._pad(
encoded_inputs=lowercase_, max_length=lowercase_, padding_strategy=lowercase_, pad_to_multiple_of=lowercase_, return_attention_mask=lowercase_, )
# Load from model defaults
if return_attention_mask is None:
snake_case = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case = len(encoded_inputs['global_attention_mask'] ) != len(lowercase_ )
if needs_to_be_padded:
snake_case = len(lowercase_ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
snake_case = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 332
| 1
|
def a__ ( UpperCAmelCase : str = 1_000_000 ) -> int:
UpperCAmelCase : Any = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , UpperCAmelCase ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution())
| 336
|
'''simple docstring'''
def _A ( snake_case , snake_case ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.2_5) = }''')
print(F'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 250
| 0
|
lowercase : Optional[Any] = {"a": ["c", "b"], "b": ["d", "e"], "c": [], "d": [], "e": []}
lowercase : str = ["a", "b", "c", "d", "e"]
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int ):
__UpperCamelCase : Optional[Any] = start
# add current to visited
visited.append(_lowerCAmelCase )
__UpperCamelCase : int = edges[current]
for neighbor in neighbors:
# if neighbor not in visited, visit
if neighbor not in visited:
__UpperCamelCase : List[Any] = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# if all neighbors visited add current to sort
sort.append(_lowerCAmelCase )
# if all vertices haven't been visited select a new one to visit
if len(_lowerCAmelCase ) != len(_lowerCAmelCase ):
for vertice in vertices:
if vertice not in visited:
__UpperCamelCase : Optional[int] = topological_sort(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# return sort
return sort
if __name__ == "__main__":
lowercase : int = topological_sort("a", [], [])
print(sort)
| 359
|
def UpperCAmelCase_ (_lowerCAmelCase : list ):
if len(_lowerCAmelCase ) <= 1:
return lst
__UpperCamelCase : Dict = 1
while i < len(_lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = lst[i], lst[i - 1]
i -= 1
if i == 0:
__UpperCamelCase : Any = 1
return lst
if __name__ == "__main__":
lowercase : Dict = input("Enter numbers separated by a comma:\n").strip()
lowercase : Union[str, Any] = [int(item) for item in user_input.split(",")]
print(gnome_sort(unsorted))
| 171
| 0
|
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
if len(__SCREAMING_SNAKE_CASE ) < 2:
return collection
def circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
lowercase = False
if low == high:
return swapped
lowercase = low
lowercase = high
while left < right:
if collection[left] > collection[right]:
lowercase , lowercase = (
collection[right],
collection[left],
)
lowercase = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
lowercase , lowercase = (
collection[right + 1],
collection[left],
)
lowercase = True
lowercase = low + int((high - low) / 2 )
lowercase = circle_sort_util(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase = circle_sort_util(__SCREAMING_SNAKE_CASE , mid + 1 , __SCREAMING_SNAKE_CASE )
return swapped or left_swap or right_swap
lowercase = True
while is_not_sorted is True:
lowercase = circle_sort_util(__SCREAMING_SNAKE_CASE , 0 , len(__SCREAMING_SNAKE_CASE ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase = input('''Enter numbers separated by a comma:\n''').strip()
UpperCAmelCase = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 195
|
import os
from datetime import datetime as dt
from github import Github
UpperCAmelCase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def UpperCAmelCase_ ( ):
lowercase = Github(os.environ['GITHUB_TOKEN'] )
lowercase = g.get_repo('huggingface/diffusers' )
lowercase = repo.get_issues(state='open' )
for issue in open_issues:
lowercase = sorted(issue.get_comments() , key=lambda __SCREAMING_SNAKE_CASE : i.created_at , reverse=__SCREAMING_SNAKE_CASE )
lowercase = comments[0] if len(__SCREAMING_SNAKE_CASE ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='closed' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='open' )
issue.remove_from_labels('stale' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
issue.add_to_labels('stale' )
if __name__ == "__main__":
main()
| 195
| 1
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
__snake_case: Any = original_name.split(""".""")[0]
__snake_case: Optional[int] = key.split(""".""")
__snake_case: List[Any] = int(key_list[key_list.index(snake_case_) - 2])
__snake_case: Optional[int] = int(key_list[key_list.index(snake_case_) - 1])
__snake_case: str = orig_block_num - offset
__snake_case: Tuple = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''')
return key
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Union[str, Any] = OrderedDict()
__snake_case , __snake_case: Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
__snake_case: List[Any] = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
__snake_case: List[str] = key[: key.find("""proj""")]
__snake_case: Optional[Any] = key.replace(snake_case_ , F'''patch_embeddings.{total_embed_found}.''')
__snake_case: Union[str, Any] = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
__snake_case: Any = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
__snake_case: Tuple = replace_key_with_offset(snake_case_ , snake_case_ , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
__snake_case: Dict = replace_key_with_offset(snake_case_ , snake_case_ , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
__snake_case: Optional[Any] = replace_key_with_offset(snake_case_ , snake_case_ , """norm1""" , """before_norm""")
if "norm2" in key:
__snake_case: Any = replace_key_with_offset(snake_case_ , snake_case_ , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
__snake_case: Tuple = replace_key_with_offset(snake_case_ , snake_case_ , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
__snake_case: List[str] = replace_key_with_offset(snake_case_ , snake_case_ , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
__snake_case: Any = key.replace("""head""" , """classifier""")
__snake_case: List[Any] = value
return new_state_dict
def A__ ( ) -> Any:
__snake_case: Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case: Union[str, Any] = Image.open(requests.get(snake_case_ , stream=snake_case_).raw)
return image
@torch.no_grad()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
__snake_case: Dict = PoolFormerConfig()
# set attributes based on model_name
__snake_case: str = """huggingface/label-files"""
__snake_case: Dict = model_name[-3:]
__snake_case: Optional[int] = 1000
__snake_case: Dict = """imagenet-1k-id2label.json"""
__snake_case: Optional[Any] = (1, 1000)
# set config attributes
__snake_case: Optional[int] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""") , """r"""))
__snake_case: str = {int(snake_case_): v for k, v in idalabel.items()}
__snake_case: List[str] = idalabel
__snake_case: Optional[int] = {v: k for k, v in idalabel.items()}
if size == "s12":
__snake_case: List[Any] = [2, 2, 6, 2]
__snake_case: int = [64, 128, 320, 512]
__snake_case: Dict = 4.0
__snake_case: Any = 0.9
elif size == "s24":
__snake_case: Optional[Any] = [4, 4, 12, 4]
__snake_case: Optional[Any] = [64, 128, 320, 512]
__snake_case: Dict = 4.0
__snake_case: List[str] = 0.9
elif size == "s36":
__snake_case: Optional[Any] = [6, 6, 18, 6]
__snake_case: List[Any] = [64, 128, 320, 512]
__snake_case: int = 4.0
__snake_case: List[Any] = 1e-6
__snake_case: Any = 0.9
elif size == "m36":
__snake_case: str = [6, 6, 18, 6]
__snake_case: str = [96, 192, 384, 768]
__snake_case: Any = 4.0
__snake_case: Tuple = 1e-6
__snake_case: Union[str, Any] = 0.95
elif size == "m48":
__snake_case: List[str] = [8, 8, 24, 8]
__snake_case: List[Any] = [96, 192, 384, 768]
__snake_case: str = 4.0
__snake_case: List[Any] = 1e-6
__snake_case: List[Any] = 0.95
else:
raise ValueError(F'''Size {size} not supported''')
# load image processor
__snake_case: int = PoolFormerImageProcessor(crop_pct=snake_case_)
# Prepare image
__snake_case: Optional[Any] = prepare_img()
__snake_case: Any = image_processor(images=snake_case_ , return_tensors="""pt""").pixel_values
logger.info(F'''Converting model {model_name}...''')
# load original state dict
__snake_case: List[Any] = torch.load(snake_case_ , map_location=torch.device("""cpu"""))
# rename keys
__snake_case: Optional[int] = rename_keys(snake_case_)
# create HuggingFace model and load state dict
__snake_case: List[str] = PoolFormerForImageClassification(snake_case_)
model.load_state_dict(snake_case_)
model.eval()
# Define image processor
__snake_case: Any = PoolFormerImageProcessor(crop_pct=snake_case_)
__snake_case: List[str] = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
__snake_case: int = model(snake_case_)
__snake_case: Optional[Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
__snake_case: List[str] = torch.tensor([-0.3_045, -0.6_758, -0.4_869])
elif size == "s24":
__snake_case: Any = torch.tensor([0.4_402, -0.1_374, -0.8_045])
elif size == "s36":
__snake_case: List[Any] = torch.tensor([-0.6_080, -0.5_133, -0.5_898])
elif size == "m36":
__snake_case: Optional[int] = torch.tensor([0.3_952, 0.2_263, -1.2_668])
elif size == "m48":
__snake_case: Union[str, Any] = torch.tensor([0.1_167, -0.0_656, -0.3_423])
else:
raise ValueError(F'''Size {size} not supported''')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-2)
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(snake_case_).mkdir(exist_ok=snake_case_)
model.save_pretrained(snake_case_)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(snake_case_)
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
from math import factorial
class _lowercase :
"""simple docstring"""
def __init__(self , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = real
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = [1] * rank
else:
a = rank
def __repr__(self ):
"""simple docstring"""
return (
F'''{self.real}+'''
F'''{'+'.join(str(lowerCamelCase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCamelCase_ )
def __add__(self , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
return Dual(self.real + other , self.duals )
a = self.duals.copy()
a = other.duals.copy()
if len(lowerCamelCase_ ) > len(lowerCamelCase_ ):
o_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) )
elif len(lowerCamelCase_ ) < len(lowerCamelCase_ ):
s_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) )
a = []
for i in range(len(lowerCamelCase_ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCamelCase_ )
__A = __add__
def __sub__(self , lowerCamelCase_ ):
"""simple docstring"""
return self + other * -1
def __mul__(self , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCamelCase_ )
a = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCamelCase_ )
__A = __mul__
def __truediv__(self , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCamelCase_ )
raise ValueError
def __floordiv__(self , lowerCamelCase_ ):
"""simple docstring"""
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
a = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCamelCase_ )
raise ValueError
def __pow__(self , lowerCamelCase_ ):
"""simple docstring"""
if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
a = self
for _ in range(n - 1 ):
x *= self
return x
def a( A : Tuple , A : Optional[Any] , A : List[str] ) -> List[Any]:
"""simple docstring"""
if not callable(_lowercase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(_lowercase , (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(_lowercase , _lowercase ):
raise ValueError("differentiate() requires an int as input for order" )
a = Dual(_lowercase , 1 )
a = func(_lowercase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(_lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def a( A : Union[str, Any] ) -> int:
"""simple docstring"""
return y**2 * y**4
print(differentiate(f, 9, 2))
| 227
|
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionDiffEditPipeline
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'}
lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'}
lowercase = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowercase = frozenset([] )
def _lowercase( self ) -> Optional[int]:
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , )
UpperCAmelCase : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , )
UpperCAmelCase : List[Any] = DDIMInverseScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , )
torch.manual_seed(0 )
UpperCAmelCase : int = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Optional[Any] = CLIPTextModel(A )
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""inverse_scheduler""": inverse_scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def _lowercase( self , A , A=0 ) -> Optional[Any]:
UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : List[Any] = torch.manual_seed(A )
else:
UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : int = {
"""prompt""": """a dog and a newt""",
"""mask_image""": mask,
"""image_latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> Optional[int]:
UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : Any = {
"""image""": image,
"""source_prompt""": """a cat and a frog""",
"""target_prompt""": """a dog and a newt""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""num_maps_per_mask""": 2,
"""mask_encode_strength""": 1.0,
"""guidance_scale""": 6.0,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self , A , A=0 ) -> str:
UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" )
if str(A ).startswith("""mps""" ):
UpperCAmelCase : Optional[int] = torch.manual_seed(A )
else:
UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A )
UpperCAmelCase : str = {
"""image""": image,
"""prompt""": """a cat and a frog""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""inpaint_strength""": 1.0,
"""guidance_scale""": 6.0,
"""decode_latents""": True,
"""output_type""": """numpy""",
}
return inputs
def _lowercase( self ) -> List[Any]:
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : int = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(A , A , A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Any = self.get_dummy_inputs(A )
UpperCAmelCase : Optional[Any] = pipe(**A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(A )
UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A )
pipe_loaded.to(A )
pipe_loaded.set_progress_bar_config(disable=A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A )
UpperCAmelCase : Tuple = pipe_loaded(**A )[0]
UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max()
self.assertLess(A , 1e-4 )
def _lowercase( self ) -> Optional[int]:
UpperCAmelCase : Optional[int] = """cpu"""
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A )
UpperCAmelCase : List[Any] = pipe.generate_mask(**A )
UpperCAmelCase : Dict = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase : Optional[int] = np.array([0] * 9 )
UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = """cpu"""
UpperCAmelCase : List[str] = self.get_dummy_components()
UpperCAmelCase : Optional[Any] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : List[str] = pipe.invert(**A ).images
UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Dict = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
def _lowercase( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def _lowercase( self ) -> int:
UpperCAmelCase : List[Any] = """cpu"""
UpperCAmelCase : int = self.get_dummy_components()
UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""}
UpperCAmelCase : int = DPMSolverMultistepScheduler(**A )
UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A )
UpperCAmelCase : List[str] = self.pipeline_class(**A )
pipe.to(A )
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A )
UpperCAmelCase : Any = pipe.invert(**A ).images
UpperCAmelCase : Dict = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Any = np.array(
[0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , )
UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(A , 1e-3 )
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
def _lowercase( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def _lowercase( cls ) -> Dict:
UpperCAmelCase : Tuple = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCAmelCase : List[str] = raw_image
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : Tuple = """a bowl of fruit"""
UpperCAmelCase : List[Any] = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Tuple = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents
UpperCAmelCase : Any = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : List[str] = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
def _lowercase( self ) -> Optional[Any]:
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=A )
UpperCAmelCase : int = """a bowl of fruit"""
UpperCAmelCase : int = """a bowl of pears"""
UpperCAmelCase : str = pipe.generate_mask(
image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , )
UpperCAmelCase : Any = pipe.invert(
prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents
UpperCAmelCase : str = pipe(
prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 265
| 0
|
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Dict = [int(__a ) for i in ip_va_address.split('.' ) if i.isdigit()]
return len(__a ) == 4 and all(0 <= int(__a ) <= 2_54 for octet in octets )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = input().strip()
_SCREAMING_SNAKE_CASE = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(F'''{ip} is a {valid_or_invalid} IP v4 address.''')
| 88
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_SCREAMING_SNAKE_CASE = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 88
| 1
|
"""simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Any = ""
a__ : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a__ : str = None # compression type in fsspec. ex: "gzip"
a__ : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , _lowercase : str = "" , _lowercase : Optional[str] = None , _lowercase : Optional[dict] = None , **_lowercase : Optional[int] ):
super().__init__(self , **_lowercase )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
__UpperCAmelCase = fsspec.open(
_lowercase , mode='''rb''' , protocol=_lowercase , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
__UpperCAmelCase = os.path.basename(self.file.path.split('''::''' )[0] )
__UpperCAmelCase = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
__UpperCAmelCase = None
@classmethod
def a ( cls : Optional[Any] , _lowercase : Tuple ):
# compressed file paths are always relative to the archive root
return super()._strip_protocol(_lowercase ).lstrip('''/''' )
def a ( self : int ):
if self.dir_cache is None:
__UpperCAmelCase = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
__UpperCAmelCase = {f['''name''']: f}
def a ( self : List[Any] , _lowercase : str ):
return self.file.open().read()
def a ( self : Optional[int] , _lowercase : str , _lowercase : str = "rb" , _lowercase : Any=None , _lowercase : Optional[Any]=True , _lowercase : Dict=None , **_lowercase : Optional[int] , ):
__UpperCAmelCase = self._strip_protocol(_lowercase )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Union[str, Any] = "bz2"
a__ : str = "bz2"
a__ : str = ".bz2"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[str] = "gzip"
a__ : List[Any] = "gzip"
a__ : int = ".gz"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : List[Any] = "lz4"
a__ : Optional[Any] = "lz4"
a__ : int = ".lz4"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "xz"
a__ : Dict = "xz"
a__ : Any = ".xz"
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Any = "zstd"
a__ : Optional[int] = "zstd"
a__ : int = ".zst"
def __init__( self : List[Any] , _lowercase : str , _lowercase : str = "rb" , _lowercase : Optional[str] = None , _lowercase : Optional[dict] = None , _lowercase : int = DEFAULT_BLOCK_SIZE , **_lowercase : Dict , ):
super().__init__(
fo=_lowercase , mode=_lowercase , target_protocol=_lowercase , target_options=_lowercase , block_size=_lowercase , **_lowercase , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
__UpperCAmelCase = self.file.__enter__
class _UpperCAmelCase :
def __init__( self : Union[str, Any] , _lowercase : Any ):
__UpperCAmelCase = file_
def __enter__( self : str ):
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *_lowercase : List[Any] , **_lowercase : Any ):
self._file.__exit__(*_lowercase , **_lowercase )
def __iter__( self : str ):
return iter(self._file )
def a ( self : Optional[int] ):
return next(self._file )
def __getattr__( self : Optional[int] , _lowercase : Tuple ):
return getattr(self._file , _lowercase )
def fixed_enter(*_lowercase : Any , **_lowercase : int ):
return WrappedFile(_enter(*_lowercase , **_lowercase ) )
__UpperCAmelCase = fixed_enter
| 332
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : str = logging.get_logger(__name__)
_lowercase : Dict = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "swinv2"
a__ : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , _lowercase : List[Any]=2_24 , _lowercase : int=4 , _lowercase : Optional[int]=3 , _lowercase : Optional[Any]=96 , _lowercase : Optional[int]=[2, 2, 6, 2] , _lowercase : Optional[int]=[3, 6, 12, 24] , _lowercase : str=7 , _lowercase : Union[str, Any]=4.0 , _lowercase : List[str]=True , _lowercase : List[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : List[Any]=0.1 , _lowercase : Union[str, Any]="gelu" , _lowercase : Tuple=False , _lowercase : Optional[int]=0.02 , _lowercase : List[Any]=1E-5 , _lowercase : Tuple=32 , **_lowercase : Optional[int] , ):
super().__init__(**_lowercase )
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__UpperCAmelCase = int(embed_dim * 2 ** (len(_lowercase ) - 1) )
__UpperCAmelCase = (0, 0, 0, 0)
| 332
| 1
|
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class _a ( _lowercase):
_a : Union[str, Any] = '''align_text_model'''
def __init__( self : Any , _SCREAMING_SNAKE_CASE : Tuple=3_0522 , _SCREAMING_SNAKE_CASE : Dict=768 , _SCREAMING_SNAKE_CASE : List[str]=12 , _SCREAMING_SNAKE_CASE : Optional[int]=12 , _SCREAMING_SNAKE_CASE : Union[str, Any]=3072 , _SCREAMING_SNAKE_CASE : Optional[Any]="gelu" , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , _SCREAMING_SNAKE_CASE : List[Any]=512 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : List[str]=0.02 , _SCREAMING_SNAKE_CASE : Union[str, Any]=1E-12 , _SCREAMING_SNAKE_CASE : Any=0 , _SCREAMING_SNAKE_CASE : List[str]="absolute" , _SCREAMING_SNAKE_CASE : List[Any]=True , **_SCREAMING_SNAKE_CASE : str , )-> Optional[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : int = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : str = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Dict = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = initializer_range
lowerCAmelCase__ : Dict = layer_norm_eps
lowerCAmelCase__ : Tuple = position_embedding_type
lowerCAmelCase__ : List[str] = use_cache
lowerCAmelCase__ : Dict = pad_token_id
@classmethod
def UpperCAmelCase__( cls : Any , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **_SCREAMING_SNAKE_CASE : Union[str, Any] )-> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCAmelCase__ : List[Any] = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _a ( _lowercase):
_a : Any = '''align_vision_model'''
def __init__( self : Any , _SCREAMING_SNAKE_CASE : int = 3 , _SCREAMING_SNAKE_CASE : int = 600 , _SCREAMING_SNAKE_CASE : float = 2.0 , _SCREAMING_SNAKE_CASE : float = 3.1 , _SCREAMING_SNAKE_CASE : int = 8 , _SCREAMING_SNAKE_CASE : List[int] = [3, 3, 5, 3, 5, 5, 3] , _SCREAMING_SNAKE_CASE : List[int] = [32, 16, 24, 40, 80, 112, 192] , _SCREAMING_SNAKE_CASE : List[int] = [16, 24, 40, 80, 112, 192, 320] , _SCREAMING_SNAKE_CASE : List[int] = [] , _SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 2, 1, 2, 1] , _SCREAMING_SNAKE_CASE : List[int] = [1, 2, 2, 3, 3, 4, 1] , _SCREAMING_SNAKE_CASE : List[int] = [1, 6, 6, 6, 6, 6, 6] , _SCREAMING_SNAKE_CASE : float = 0.25 , _SCREAMING_SNAKE_CASE : str = "swish" , _SCREAMING_SNAKE_CASE : int = 2560 , _SCREAMING_SNAKE_CASE : str = "mean" , _SCREAMING_SNAKE_CASE : float = 0.02 , _SCREAMING_SNAKE_CASE : float = 0.001 , _SCREAMING_SNAKE_CASE : float = 0.99 , _SCREAMING_SNAKE_CASE : float = 0.2 , **_SCREAMING_SNAKE_CASE : List[str] , )-> Optional[Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : int = image_size
lowerCAmelCase__ : List[str] = width_coefficient
lowerCAmelCase__ : Optional[Any] = depth_coefficient
lowerCAmelCase__ : Dict = depth_divisor
lowerCAmelCase__ : int = kernel_sizes
lowerCAmelCase__ : Any = in_channels
lowerCAmelCase__ : str = out_channels
lowerCAmelCase__ : Union[str, Any] = depthwise_padding
lowerCAmelCase__ : str = strides
lowerCAmelCase__ : Tuple = num_block_repeats
lowerCAmelCase__ : int = expand_ratios
lowerCAmelCase__ : Any = squeeze_expansion_ratio
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : List[str] = hidden_dim
lowerCAmelCase__ : List[str] = pooling_type
lowerCAmelCase__ : Dict = initializer_range
lowerCAmelCase__ : Tuple = batch_norm_eps
lowerCAmelCase__ : Optional[int] = batch_norm_momentum
lowerCAmelCase__ : Any = drop_connect_rate
lowerCAmelCase__ : List[str] = sum(_SCREAMING_SNAKE_CASE ) * 4
@classmethod
def UpperCAmelCase__( cls : Any , _SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , **_SCREAMING_SNAKE_CASE : str )-> "PretrainedConfig":
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCAmelCase__ : Optional[int] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class _a ( _lowercase):
_a : List[str] = '''align'''
_a : Tuple = True
def __init__( self : Any , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : str=640 , _SCREAMING_SNAKE_CASE : int=1.0 , _SCREAMING_SNAKE_CASE : int=0.02 , **_SCREAMING_SNAKE_CASE : int , )-> Union[str, Any]:
super().__init__(**_SCREAMING_SNAKE_CASE )
if text_config is None:
lowerCAmelCase__ : Optional[Any] = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
lowerCAmelCase__ : Union[str, Any] = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
lowerCAmelCase__ : int = AlignTextConfig(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = AlignVisionConfig(**_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = projection_dim
lowerCAmelCase__ : int = temperature_init_value
lowerCAmelCase__ : Union[str, Any] = initializer_range
@classmethod
def UpperCAmelCase__( cls : Tuple , _SCREAMING_SNAKE_CASE : AlignTextConfig , _SCREAMING_SNAKE_CASE : AlignVisionConfig , **_SCREAMING_SNAKE_CASE : str )-> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int )-> Any:
lowerCAmelCase__ : str = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Any = self.text_config.to_dict()
lowerCAmelCase__ : List[str] = self.vision_config.to_dict()
lowerCAmelCase__ : Tuple = self.__class__.model_type
return output
| 352
|
import math
class _a :
def __init__( self : List[Any] , _SCREAMING_SNAKE_CASE : Any=0 )-> Optional[Any]: # a graph with Node 0,1,...,N-1
lowerCAmelCase__ : Optional[int] = n
lowerCAmelCase__ : List[Any] = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # adjacency matrix for weight
lowerCAmelCase__ : str = [
[math.inf for j in range(0 , _SCREAMING_SNAKE_CASE )] for i in range(0 , _SCREAMING_SNAKE_CASE )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str )-> List[str]:
lowerCAmelCase__ : Optional[int] = w
def UpperCAmelCase__( self : List[Any] )-> Optional[int]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
lowerCAmelCase__ : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str )-> str:
return self.dp[u][v]
if __name__ == "__main__":
lowerCamelCase = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 211
| 0
|
import json
import sys
def lowerCAmelCase_ ( __A, __A ) -> str:
'''simple docstring'''
with open(__A, encoding="utf-8" ) as f:
UpperCAmelCase__ = json.load(__A )
UpperCAmelCase__ = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(__A ):
UpperCAmelCase__ = results[benchmark_name]
UpperCAmelCase__ = benchmark_name.split("/" )[-1]
output_md.append(f"""### Benchmark: {benchmark_file_name}""" )
UpperCAmelCase__ = """| metric |"""
UpperCAmelCase__ = """|--------|"""
UpperCAmelCase__ = """| new / old (diff) |"""
for metric_name in sorted(__A ):
UpperCAmelCase__ = benchmark_res[metric_name]
UpperCAmelCase__ = metric_vals["""new"""]
UpperCAmelCase__ = metric_vals.get("old", __A )
UpperCAmelCase__ = metric_vals.get("diff", __A )
UpperCAmelCase__ = f""" {new_val:f}""" if isinstance(__A, (int, float) ) else """None"""
if old_val is not None:
val_str += f""" / {old_val:f}""" if isinstance(__A, (int, float) ) else "None"
if dif_val is not None:
val_str += f""" ({dif_val:f})""" if isinstance(__A, (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(__A, "w", encoding="utf-8" ) as f:
f.writelines("\n".join(__A ) )
if __name__ == "__main__":
UpperCamelCase__ = sys.argv[1]
UpperCamelCase__ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 65
|
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_A = 10
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
for i in range(lowerCAmelCase , lowerCAmelCase ):
if array[i] == target:
return i
return -1
def a__ ( lowerCAmelCase , lowerCAmelCase ) -> int:
UpperCAmelCase__ : str = 0
UpperCAmelCase__ : Dict = len(lowerCAmelCase )
while left <= right:
if right - left < precision:
return lin_search(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase__ : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase__ : str = one_third - 1
elif array[two_third] < target:
UpperCAmelCase__ : Tuple = two_third + 1
else:
UpperCAmelCase__ : Any = one_third + 1
UpperCAmelCase__ : str = two_third - 1
else:
return -1
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
if left < right:
if right - left < precision:
return lin_search(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase__ : List[str] = (left + right) // 3 + 1
UpperCAmelCase__ : Tuple = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowerCAmelCase , one_third - 1 , lowerCAmelCase , lowerCAmelCase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCAmelCase , lowerCAmelCase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_A = input("""Enter numbers separated by comma:\n""").strip()
_A = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_A = int(input("""Enter the number to be found in the list:\n""").strip())
_A = ite_ternary_search(collection, target)
_A = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f'''Iterative search: {target} found at positions: {resulta}''')
print(f'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 171
| 0
|
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase ( A_ )-> Dict:
'''simple docstring'''
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def lowercase ( A_ )-> Optional[Any]:
'''simple docstring'''
for char in word:
a : List[Any] = ord(A_ )
if not _is_chinese_char(A_ ):
return 0
return 1
def lowercase ( A_ )-> Union[str, Any]:
'''simple docstring'''
a : List[str] = set()
for token in tokens:
a : Union[str, Any] = len(A_ ) > 1 and is_chinese(A_ )
if chinese_word:
word_set.add(A_ )
a : List[Any] = list(A_ )
return word_list
def lowercase ( A_ , A_ )-> Tuple:
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
a : Any = max([len(A_ ) for w in chinese_word_set] )
a : Dict = bert_tokens
a : int = 0, len(A_ )
while start < end:
a : Optional[Any] = True
if is_chinese(bert_word[start] ):
a : List[Any] = min(end - start , A_ )
for i in range(A_ , 1 , -1 ):
a : Any = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
a : Dict = "##" + bert_word[j]
a : Any = start + i
a : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def lowercase ( A_ , A_ , A_ )-> str:
'''simple docstring'''
a : Any = []
for i in range(0 , len(A_ ) , 100 ):
a : List[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
a : Union[str, Any] = [get_chinese_word(A_ ) for r in res]
ltp_res.extend(A_ )
assert len(A_ ) == len(A_ )
a : int = []
for i in range(0 , len(A_ ) , 100 ):
a : int = bert_tokenizer(lines[i : i + 100] , add_special_tokens=A_ , truncation=A_ , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(A_ ) == len(A_ )
a : Optional[Any] = []
for input_ids, chinese_word in zip(A_ , A_ ):
a : Any = []
for id in input_ids:
a : List[str] = bert_tokenizer._convert_id_to_token(A_ )
input_tokens.append(A_ )
a : int = add_sub_symbol(A_ , A_ )
a : List[Any] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(A_ ):
if token[:2] == "##":
a : List[Any] = token[2:]
# save chinese tokens' pos
if len(A_ ) == 1 and _is_chinese_char(ord(A_ ) ):
ref_id.append(A_ )
ref_ids.append(A_ )
assert len(A_ ) == len(A_ )
return ref_ids
def lowercase ( A_ )-> str:
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
a : Optional[int] = f.readlines()
a : Optional[Any] = [line.strip() for line in data if len(A_ ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
a : Tuple = LTP(args.ltp ) # faster in GPU device
a : int = BertTokenizer.from_pretrained(args.bert )
a : Optional[Any] = prepare_ref(A_ , A_ , A_ )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
a : List[str] = [json.dumps(A_ ) + "\n" for ref in ref_ids]
f.writelines(A_ )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser(description="""prepare_chinese_ref""")
parser.add_argument(
"""--file_name""",
type=str,
default="""./resources/chinese-demo.txt""",
help="""file need process, same as training data in lm""",
)
parser.add_argument(
"""--ltp""", type=str, default="""./resources/ltp""", help="""resources for LTP tokenizer, usually a path"""
)
parser.add_argument("""--bert""", type=str, default="""./resources/robert""", help="""resources for Bert tokenizer""")
parser.add_argument("""--save_path""", type=str, default="""./resources/ref.txt""", help="""path to save res""")
__lowercase = parser.parse_args()
main(args)
| 367
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase = {
"""configuration_instructblip""": [
"""INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InstructBlipConfig""",
"""InstructBlipQFormerConfig""",
"""InstructBlipVisionConfig""",
],
"""processing_instructblip""": ["""InstructBlipProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase = [
"""INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InstructBlipQFormerModel""",
"""InstructBlipPreTrainedModel""",
"""InstructBlipForConditionalGeneration""",
"""InstructBlipVisionModel""",
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 226
| 0
|
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : str = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
return Dataset.from_dict(lowercase)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Optional[Any] = self._create_example_records()
a__ : Dict = Dataset.from_list(lowercase)
self.assertListEqual(dset.column_names , ['col_1', 'col_2'])
for i, r in enumerate(lowercase):
self.assertDictEqual(lowercase , example_records[i])
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : Any = self._create_example_records()
a__ : Optional[int] = Dataset.from_list(lowercase)
a__ : str = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]})
self.assertEqual(dset.info , dset_from_dict.info)
def __lowercase ( self) -> Optional[int]: # checks what happens with missing columns
'''simple docstring'''
a__ : int = [{'col_1': 1}, {'col_2': 'x'}]
a__ : int = Dataset.from_list(lowercase)
self.assertDictEqual(dset[0] , {'col_1': 1})
self.assertDictEqual(dset[1] , {'col_1': None}) # NB: first record is used for columns
def __lowercase ( self) -> int: # checks if the type can be inferred from the second record
'''simple docstring'''
a__ : Union[str, Any] = [{'col_1': []}, {'col_1': [1, 2]}]
a__ : Any = Dataset.from_list(lowercase)
self.assertEqual(dset.info.features['col_1'] , Sequence(Value('int64')))
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
a__ : List[str] = Dataset.from_list([])
self.assertEqual(len(lowercase) , 0)
self.assertListEqual(dset.column_names , [])
| 99
|
"""simple docstring"""
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->float:
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
lowerCAmelCase__ :Union[str, Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(_SCREAMING_SNAKE_CASE ) )
return round(_SCREAMING_SNAKE_CASE , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 293
| 0
|
"""simple docstring"""
from statistics import mean
import numpy as np
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
lowercase__: int = 0
# Number of processes finished
lowercase__: Dict = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
lowercase__: Any = [0] * no_of_process
# List to include calculation results
lowercase__: List[Any] = [0] * no_of_process
# Sort by arrival time.
lowercase__: Union[str, Any] = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE_ )]
lowercase__: str = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE_ )]
arrival_time.sort()
while no_of_process > finished_process_count:
lowercase__: Optional[Any] = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
lowercase__: List[str] = arrival_time[i]
lowercase__: Any = 0
# Index showing the location of the process being performed
lowercase__: int = 0
# Saves the current response ratio.
lowercase__: Dict = 0
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
lowercase__: Optional[int] = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
lowercase__: Union[str, Any] = temp
lowercase__: int = i
# Calculate the turn around time
lowercase__: Optional[Any] = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
lowercase__: List[str] = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
lowercase__: int = [0] * no_of_process
for i in range(0 , SCREAMING_SNAKE_CASE_ ):
lowercase__: List[str] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__A = 5
__A = ["A", "B", "C", "D", "E"]
__A = [1, 2, 3, 4, 5]
__A = [1, 2, 3, 4, 5]
__A = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__A = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
f'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(f'''average waiting time : {mean(waiting_time):.5f}''')
print(f'''average turn around time : {mean(turn_around_time):.5f}''')
| 363
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2
| 0
|
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Optional[Any] = {
'configuration_informer': [
'INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'InformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str = [
'INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'InformerForPrediction',
'InformerModel',
'InformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 88
|
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Any = 32
def a__ ( A_, A_, A_, A_, A_ = 16 ):
'''simple docstring'''
__magic_name__ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__magic_name__ = DatasetDict(
{
"""train""": dataset["""train"""].select(A_ ),
"""validation""": dataset["""train"""].select(A_ ),
"""test""": dataset["""validation"""],
} )
def tokenize_function(A_ ):
# max_length=None => use the model max length (it's actually the default)
__magic_name__ = tokenizer(examples["""sentence1"""], examples["""sentence2"""], truncation=A_, max_length=A_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__magic_name__ = datasets.map(
A_, batched=A_, remove_columns=["""idx""", """sentence1""", """sentence2"""], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__magic_name__ = tokenized_datasets.rename_column("""label""", """labels""" )
def collate_fn(A_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__magic_name__ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__magic_name__ = 16
elif accelerator.mixed_precision != "no":
__magic_name__ = 8
else:
__magic_name__ = None
return tokenizer.pad(
A_, padding="""longest""", max_length=A_, pad_to_multiple_of=A_, return_tensors="""pt""", )
# Instantiate dataloaders.
__magic_name__ = DataLoader(
tokenized_datasets["""train"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""validation"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
__magic_name__ = DataLoader(
tokenized_datasets["""test"""], shuffle=A_, collate_fn=A_, batch_size=A_ )
return train_dataloader, eval_dataloader, test_dataloader
def a__ ( A_, A_ ):
'''simple docstring'''
__magic_name__ = []
# Download the dataset
__magic_name__ = load_dataset("""glue""", """mrpc""" )
# Create our splits
__magic_name__ = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
__magic_name__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__magic_name__ = config["""lr"""]
__magic_name__ = int(config["""num_epochs"""] )
__magic_name__ = int(config["""seed"""] )
__magic_name__ = int(config["""batch_size"""] )
__magic_name__ = evaluate.load("""glue""", """mrpc""" )
# If the batch size is too big we use gradient accumulation
__magic_name__ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__magic_name__ = batch_size // MAX_GPU_BATCH_SIZE
__magic_name__ = MAX_GPU_BATCH_SIZE
set_seed(A_ )
# New Code #
# Create our folds:
__magic_name__ = kfold.split(np.zeros(datasets["""train"""].num_rows ), datasets["""train"""]["""label"""] )
__magic_name__ = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(A_ ):
__magic_name__ , __magic_name__ , __magic_name__ = get_fold_dataloaders(
A_, A_, A_, A_, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__magic_name__ = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""", return_dict=A_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__magic_name__ = model.to(accelerator.device )
# Instantiate optimizer
__magic_name__ = AdamW(params=model.parameters(), lr=A_ )
# Instantiate scheduler
__magic_name__ = get_linear_schedule_with_warmup(
optimizer=A_, num_warmup_steps=100, num_training_steps=(len(A_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
A_, A_, A_, A_, A_ )
# Now we train the model
for epoch in range(A_ ):
model.train()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__magic_name__ = model(**A_ )
__magic_name__ = outputs.loss
__magic_name__ = loss / gradient_accumulation_steps
accelerator.backward(A_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits.argmax(dim=-1 )
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=A_, references=A_, )
__magic_name__ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''', A_ )
# New Code #
# We also run predictions on the test set at the very end
__magic_name__ = []
for step, batch in enumerate(A_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__magic_name__ = model(**A_ )
__magic_name__ = outputs.logits
__magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(A_, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
__magic_name__ = torch.cat(A_, dim=0 )
__magic_name__ = torch.stack(A_, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
__magic_name__ = metric.compute(predictions=A_, references=A_ )
accelerator.print("""Average test metrics from all folds:""", A_ )
def a__ ( ):
'''simple docstring'''
__magic_name__ = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""", type=A_, default=A_, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""", )
parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" )
# New Code #
parser.add_argument("""--num_folds""", type=A_, default=3, help="""The number of splits to perform across the dataset""" )
__magic_name__ = parser.parse_args()
__magic_name__ = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(A_, A_ )
if __name__ == "__main__":
main()
| 88
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Optional[int] = {
"microsoft/swinv2-tiny-patch4-window8-256": (
"https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json"
),
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """swinv2"""
lowerCAmelCase__ = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Union[str, Any] , A : Any=224 , A : Optional[int]=4 , A : Any=3 , A : int=96 , A : Optional[int]=[2, 2, 6, 2] , A : Optional[int]=[3, 6, 12, 24] , A : List[str]=7 , A : Dict=4.0 , A : Optional[Any]=True , A : List[str]=0.0 , A : str=0.0 , A : int=0.1 , A : Dict="gelu" , A : int=False , A : int=0.02 , A : Union[str, Any]=1E-5 , A : str=32 , **A : Union[str, Any] , ):
super().__init__(**A )
__snake_case: List[str] = image_size
__snake_case: List[str] = patch_size
__snake_case: int = num_channels
__snake_case: str = embed_dim
__snake_case: Union[str, Any] = depths
__snake_case: Any = len(A )
__snake_case: List[str] = num_heads
__snake_case: Tuple = window_size
__snake_case: List[str] = mlp_ratio
__snake_case: Dict = qkv_bias
__snake_case: List[Any] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: Any = drop_path_rate
__snake_case: Optional[Any] = hidden_act
__snake_case: List[str] = use_absolute_embeddings
__snake_case: Any = layer_norm_eps
__snake_case: Union[str, Any] = initializer_range
__snake_case: List[Any] = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__snake_case: Any = int(embed_dim * 2 ** (len(A ) - 1) )
__snake_case: str = (0, 0, 0, 0)
| 293
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293
| 1
|
lowercase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowercase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowercase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 205
|
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowercase_ = datasets.utils.logging.get_logger(__name__)
class __A ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class __A ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
__lowerCamelCase : int = datasets.Audio()
__lowerCamelCase : str = 'audio'
__lowerCamelCase : Optional[Any] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Union[str, Any] = AudioClassification(audio_column='audio' , label_column='label' )
lowercase_ = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
lowercase_ = AUDIO_EXTENSIONS
| 211
| 0
|
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : str ) -> Dict:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ : Optional[Any] = mock.Mock()
SCREAMING_SNAKE_CASE__ : int = 5_0_0
SCREAMING_SNAKE_CASE__ : List[str] = {}
SCREAMING_SNAKE_CASE__ : Tuple = HTTPError
SCREAMING_SNAKE_CASE__ : Any = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ : Optional[int] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def A_ ( self : Union[str, Any] ) -> str:
"""simple docstring"""
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ : List[str] = mock.Mock()
SCREAMING_SNAKE_CASE__ : Any = 5_0_0
SCREAMING_SNAKE_CASE__ : int = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = HTTPError
SCREAMING_SNAKE_CASE__ : Dict = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request", return_value=_UpperCAmelCase ) as mock_head:
SCREAMING_SNAKE_CASE__ : Tuple = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
try:
SCREAMING_SNAKE_CASE__ : Dict = tempfile.mktemp()
with open(_UpperCAmelCase, "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = AlbertTokenizer.from_pretrained(_UpperCAmelCase )
finally:
os.remove(_UpperCAmelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json", "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json", _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size, 1_0_0_0 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def A_ ( self : str ) -> Optional[Any]:
"""simple docstring"""
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ : List[str] = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def A_ ( cls : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = TOKEN
HfFolder.save_token(_UpperCAmelCase )
@classmethod
def A_ ( cls : List[str] ) -> int:
"""simple docstring"""
try:
delete_repo(token=cls._token, repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def A_ ( self : Any ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Tuple = BertTokenizer(_UpperCAmelCase )
tokenizer.push_to_hub("test-tokenizer", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(_UpperCAmelCase, repo_id="test-tokenizer", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : str = BertTokenizer.from_pretrained(F'''{USER}/test-tokenizer''' )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
def A_ ( self : List[Any] ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : str = BertTokenizer(_UpperCAmelCase )
tokenizer.push_to_hub("valid_org/test-tokenizer-org", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : int = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
# Reset repo
delete_repo(token=self._token, repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
_UpperCAmelCase, repo_id="valid_org/test-tokenizer-org", push_to_hub=_UpperCAmelCase, use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Dict = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab, tokenizer.vocab )
@require_tokenizers
def A_ ( self : int ) -> Tuple:
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Dict = CustomTokenizer(_UpperCAmelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE__ : int = os.path.join(_UpperCAmelCase, "vocab.txt" )
with open(_UpperCAmelCase, "w", encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = BertTokenizerFast.from_pretrained(_UpperCAmelCase )
bert_tokenizer.save_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomTokenizerFast.from_pretrained(_UpperCAmelCase )
tokenizer.push_to_hub("test-dynamic-tokenizer", use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained(F'''{USER}/test-dynamic-tokenizer''', trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizerFast" )
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained(
F'''{USER}/test-dynamic-tokenizer''', use_fast=_UpperCAmelCase, trust_remote_code=_UpperCAmelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__, "CustomTokenizer" )
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data, {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def A_ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ), ["[CLS]", " This is a ", "extra_id_100"] )
def A_ ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ), ["A", "BC"] )
self.assertEqual(trie.split("BCA" ), ["BC", "A"] )
def A_ ( self : Any ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def A_ ( self : List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ), ["This is something ", "[SPECIAL_TOKEN]"] )
def A_ ( self : Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ), ["AB", "C"] )
def A_ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ), ["ABC", "D"] )
def A_ ( self : Optional[int] ) -> Dict:
"""simple docstring"""
# Even if the offsets are wrong, we necessarily output correct string
# parts.
SCREAMING_SNAKE_CASE__ : List[Any] = Trie()
SCREAMING_SNAKE_CASE__ : Optional[Any] = trie.cut_text("ABC", [0, 0, 2, 1, 2, 3] )
self.assertEqual(_UpperCAmelCase, ["AB", "C"] )
| 191
|
import json
import os
from typing import Optional
import numpy as np
from ...feature_extraction_utils import BatchFeature
from ...processing_utils import ProcessorMixin
from ...utils import logging
from ...utils.hub import get_file_from_repo
from ..auto import AutoTokenizer
_lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = "AutoTokenizer"
UpperCAmelCase_ = ["tokenizer"]
UpperCAmelCase_ = {
"semantic_prompt": 1,
"coarse_prompt": 2,
"fine_prompt": 2,
}
def __init__( self : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = speaker_embeddings
@classmethod
def A_ ( cls : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : Dict="speaker_embeddings_path.json", **_UpperCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if speaker_embeddings_dict_path is not None:
SCREAMING_SNAKE_CASE__ : Any = get_file_from_repo(
_UpperCAmelCase, _UpperCAmelCase, subfolder=kwargs.pop("subfolder", _UpperCAmelCase ), cache_dir=kwargs.pop("cache_dir", _UpperCAmelCase ), force_download=kwargs.pop("force_download", _UpperCAmelCase ), proxies=kwargs.pop("proxies", _UpperCAmelCase ), resume_download=kwargs.pop("resume_download", _UpperCAmelCase ), local_files_only=kwargs.pop("local_files_only", _UpperCAmelCase ), use_auth_token=kwargs.pop("use_auth_token", _UpperCAmelCase ), revision=kwargs.pop("revision", _UpperCAmelCase ), )
if speaker_embeddings_path is None:
logger.warning(
F'''`{os.path.join(_UpperCAmelCase, _UpperCAmelCase )}` does not exists
, no preloaded speaker embeddings will be used - Make sure to provide a correct path to the json
dictionnary if wanted, otherwise set `speaker_embeddings_dict_path=None`.''' )
SCREAMING_SNAKE_CASE__ : Dict = None
else:
with open(_UpperCAmelCase ) as speaker_embeddings_json:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = json.load(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[str] = AutoTokenizer.from_pretrained(_UpperCAmelCase, **_UpperCAmelCase )
return cls(tokenizer=_UpperCAmelCase, speaker_embeddings=_UpperCAmelCase )
def A_ ( self : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[str]="speaker_embeddings_path.json", _UpperCAmelCase : Optional[Any]="speaker_embeddings", _UpperCAmelCase : bool = False, **_UpperCAmelCase : List[str], ) -> Union[str, Any]:
"""simple docstring"""
if self.speaker_embeddings is not None:
os.makedirs(os.path.join(_UpperCAmelCase, _UpperCAmelCase, "v2" ), exist_ok=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : Any = save_directory
for prompt_key in self.speaker_embeddings:
if prompt_key != "repo_or_path":
SCREAMING_SNAKE_CASE__ : List[Any] = self._load_voice_preset(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = {}
for key in self.speaker_embeddings[prompt_key]:
np.save(
os.path.join(
embeddings_dict["repo_or_path"], _UpperCAmelCase, F'''{prompt_key}_{key}''' ), voice_preset[key], allow_pickle=_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_UpperCAmelCase, F'''{prompt_key}_{key}.npy''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = tmp_dict
with open(os.path.join(_UpperCAmelCase, _UpperCAmelCase ), "w" ) as fp:
json.dump(_UpperCAmelCase, _UpperCAmelCase )
super().save_pretrained(_UpperCAmelCase, _UpperCAmelCase, **_UpperCAmelCase )
def A_ ( self : List[Any], _UpperCAmelCase : str = None, **_UpperCAmelCase : List[Any] ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.speaker_embeddings[voice_preset]
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset_paths:
raise ValueError(
F'''Voice preset unrecognized, missing {key} as a key in self.speaker_embeddings[{voice_preset}].''' )
SCREAMING_SNAKE_CASE__ : List[Any] = get_file_from_repo(
self.speaker_embeddings.get("repo_or_path", "/" ), voice_preset_paths[key], subfolder=kwargs.pop("subfolder", _UpperCAmelCase ), cache_dir=kwargs.pop("cache_dir", _UpperCAmelCase ), force_download=kwargs.pop("force_download", _UpperCAmelCase ), proxies=kwargs.pop("proxies", _UpperCAmelCase ), resume_download=kwargs.pop("resume_download", _UpperCAmelCase ), local_files_only=kwargs.pop("local_files_only", _UpperCAmelCase ), use_auth_token=kwargs.pop("use_auth_token", _UpperCAmelCase ), revision=kwargs.pop("revision", _UpperCAmelCase ), )
if path is None:
raise ValueError(
F'''`{os.path.join(self.speaker_embeddings.get("repo_or_path", "/" ), voice_preset_paths[key] )}` does not exists
, no preloaded voice preset will be used - Make sure to provide correct paths to the {voice_preset}
embeddings.''' )
SCREAMING_SNAKE_CASE__ : int = np.load(_UpperCAmelCase )
return voice_preset_dict
def A_ ( self : int, _UpperCAmelCase : Optional[dict] = None ) -> Optional[int]:
"""simple docstring"""
for key in ["semantic_prompt", "coarse_prompt", "fine_prompt"]:
if key not in voice_preset:
raise ValueError(F'''Voice preset unrecognized, missing {key} as a key.''' )
if not isinstance(voice_preset[key], np.ndarray ):
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
if len(voice_preset[key].shape ) != self.preset_shape[key]:
raise ValueError(F'''{key} voice preset must be a {str(self.preset_shape[key] )}D ndarray.''' )
def __call__( self : List[Any], _UpperCAmelCase : Optional[Any]=None, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Optional[int]="pt", _UpperCAmelCase : List[str]=2_5_6, _UpperCAmelCase : int=False, _UpperCAmelCase : Optional[int]=True, _UpperCAmelCase : Any=False, **_UpperCAmelCase : List[str], ) -> List[Any]:
"""simple docstring"""
if voice_preset is not None and not isinstance(_UpperCAmelCase, _UpperCAmelCase ):
if (
isinstance(_UpperCAmelCase, _UpperCAmelCase )
and self.speaker_embeddings is not None
and voice_preset in self.speaker_embeddings
):
SCREAMING_SNAKE_CASE__ : List[str] = self._load_voice_preset(_UpperCAmelCase )
else:
if isinstance(_UpperCAmelCase, _UpperCAmelCase ) and not voice_preset.endswith(".npz" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = voice_preset + ".npz"
SCREAMING_SNAKE_CASE__ : List[str] = np.load(_UpperCAmelCase )
if voice_preset is not None:
self._validate_voice_preset_dict(_UpperCAmelCase, **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any = BatchFeature(data=_UpperCAmelCase, tensor_type=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer(
_UpperCAmelCase, return_tensors=_UpperCAmelCase, padding="max_length", max_length=_UpperCAmelCase, return_attention_mask=_UpperCAmelCase, return_token_type_ids=_UpperCAmelCase, add_special_tokens=_UpperCAmelCase, **_UpperCAmelCase, )
if voice_preset is not None:
SCREAMING_SNAKE_CASE__ : str = voice_preset
return encoded_text
| 191
| 1
|
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __lowercase ( _a , _a , _a = False ):
if radian_mode:
return [magnitude * cos(_UpperCAmelCase ), magnitude * sin(_UpperCAmelCase )]
return [magnitude * cos(radians(_UpperCAmelCase ) ), magnitude * sin(radians(_UpperCAmelCase ) )]
def __lowercase ( _a , _a , _a = 10**-1 ):
snake_case_ : NDArray[floataa] = cross(_UpperCAmelCase , _UpperCAmelCase )
snake_case_ : float = sum(_UpperCAmelCase )
return abs(_UpperCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase__ : List[str] = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase__ : List[str] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase__ : Any = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase__ : Optional[Any] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase__ : int = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase__ : int = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 264
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , a_ : Dict , a_ : Union[str, Any]=7 , a_ : Optional[Any]=3 , a_ : List[str]=18 , a_ : Union[str, Any]=30 , a_ : Union[str, Any]=4_00 , a_ : Union[str, Any]=True , a_ : Tuple=None , a_ : Optional[int]=True , ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = size if size is not None else {'''height''': 18, '''width''': 18}
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : str = batch_size
__UpperCAmelCase : List[str] = num_channels
__UpperCAmelCase : Union[str, Any] = image_size
__UpperCAmelCase : Optional[int] = min_resolution
__UpperCAmelCase : Union[str, Any] = max_resolution
__UpperCAmelCase : Tuple = do_resize
__UpperCAmelCase : List[str] = size
__UpperCAmelCase : List[Any] = apply_ocr
def snake_case__ ( self : Optional[int] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase__ ( __UpperCamelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : str = LayoutLMvaImageProcessingTester(self )
@property
def snake_case__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a_ , '''do_resize''' ) )
self.assertTrue(hasattr(a_ , '''size''' ) )
self.assertTrue(hasattr(a_ , '''apply_ocr''' ) )
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
__UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def snake_case__ ( self : int ):
'''simple docstring'''
pass
def snake_case__ ( self : List[str] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__UpperCAmelCase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , Image.Image )
# Test not batched input
__UpperCAmelCase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , a_ )
self.assertIsInstance(encoding.boxes , a_ )
# Test batched
__UpperCAmelCase : Optional[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : List[Any] ):
'''simple docstring'''
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , numpify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , np.ndarray )
# Test not batched input
__UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase : int = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a_ , torchify=a_ )
for image in image_inputs:
self.assertIsInstance(a_ , torch.Tensor )
# Test not batched input
__UpperCAmelCase : str = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
__UpperCAmelCase : List[Any] = image_processing(a_ , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def snake_case__ ( self : Optional[Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = LayoutLMvaImageProcessor()
from datasets import load_dataset
__UpperCAmelCase : Any = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
__UpperCAmelCase : Optional[int] = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__UpperCAmelCase : Any = image_processing(a_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__UpperCAmelCase : Any = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__UpperCAmelCase : Tuple = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , a_ )
self.assertListEqual(encoding.boxes , a_ )
# with apply_OCR = False
__UpperCAmelCase : List[str] = LayoutLMvaImageProcessor(apply_ocr=a_ )
__UpperCAmelCase : List[Any] = image_processing(a_ , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 226
| 0
|
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__A = logging.getLogger(__name__)
@dataclass(frozen=_UpperCAmelCase )
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :str
_UpperCAmelCase :str
_UpperCAmelCase :Optional[str] = None
_UpperCAmelCase :Optional[str] = None
_UpperCAmelCase :Optional[str] = None
@dataclass(frozen=_UpperCAmelCase )
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :List[int]
_UpperCAmelCase :Optional[List[int]] = None
_UpperCAmelCase :Optional[List[int]] = None
_UpperCAmelCase :Optional[Union[int, float]] = None
_UpperCAmelCase :Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :List[InputFeatures]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ):
lowercase__: Any = hans_processors[task]()
lowercase__: int = os.path.join(
_UpperCAmelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
lowercase__: Dict = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__, lowercase__: Any = label_list[2], label_list[1]
lowercase__: Any = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__: Optional[int] = cached_features_file + '''.lock'''
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(F"""Loading features from cached file {cached_features_file}""" )
lowercase__: Optional[Any] = torch.load(_UpperCAmelCase )
else:
logger.info(F"""Creating features from dataset file at {data_dir}""" )
lowercase__: int = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('''Training examples: %s''' , len(_UpperCAmelCase ) )
lowercase__: Tuple = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('''Saving features into cached file %s''' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self , _UpperCAmelCase ):
return self.features[i]
def _snake_case ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase :
"""simple docstring"""
_UpperCAmelCase :List[InputFeatures]
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ):
lowercase__: Tuple = hans_processors[task]()
lowercase__: Union[str, Any] = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__, lowercase__: Union[str, Any] = label_list[2], label_list[1]
lowercase__: Dict = label_list
lowercase__: Union[str, Any] = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
lowercase__: Any = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase__: Optional[Any] = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _snake_case ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , _UpperCAmelCase ):
return self.features[i]
def _snake_case ( self ):
return self.label_list
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def _snake_case ( self , _UpperCAmelCase ):
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def _snake_case ( self , _UpperCAmelCase ):
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def _snake_case ( self ):
return ["contradiction", "entailment", "neutral"]
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
lowercase__: List[Any] = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
lowercase__: int = '''%s-%s''' % (set_type, line[0])
lowercase__: Optional[int] = line[5]
lowercase__: Optional[Any] = line[6]
lowercase__: Dict = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowercase__: List[str] = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) -> Any:
lowercase__: Tuple = {label: i for i, label in enumerate(__UpperCAmelCase )}
lowercase__: Optional[Any] = []
for ex_index, example in tqdm.tqdm(enumerate(__UpperCAmelCase ) , desc='''convert examples to features''' ):
if ex_index % 1_0_0_0_0 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowercase__: Optional[int] = tokenizer(
example.text_a , example.text_b , add_special_tokens=__UpperCAmelCase , max_length=__UpperCAmelCase , padding='''max_length''' , truncation=__UpperCAmelCase , return_overflowing_tokens=__UpperCAmelCase , )
lowercase__: Dict = label_map[example.label] if example.label in label_map else 0
lowercase__: Optional[int] = int(example.pairID )
features.append(InputFeatures(**__UpperCAmelCase , label=__UpperCAmelCase , pairID=__UpperCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(F"""guid: {example}""" )
logger.info(F"""features: {features[i]}""" )
return features
__A = {
"hans": 3,
}
__A = {
"hans": HansProcessor,
}
| 2
|
"""simple docstring"""
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_path", default=None, type=str, required=True, help="Path to the checkpoint to convert."
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
"--original_config_file",
default=None,
type=str,
help="The YAML config file corresponding to the original architecture.",
)
parser.add_argument(
"--num_in_channels",
default=None,
type=int,
help="The number of input channels. If `None` number of input channels will be automatically inferred.",
)
parser.add_argument(
"--scheduler_type",
default="pndm",
type=str,
help="Type of scheduler to use. Should be one of ['pndm', 'lms', 'ddim', 'euler', 'euler-ancestral', 'dpm']",
)
parser.add_argument(
"--pipeline_type",
default=None,
type=str,
help=(
"The pipeline type. One of 'FrozenOpenCLIPEmbedder', 'FrozenCLIPEmbedder', 'PaintByExample'"
". If `None` pipeline will be automatically inferred."
),
)
parser.add_argument(
"--image_size",
default=None,
type=int,
help=(
"The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2"
" Base. Use 768 for Stable Diffusion v2."
),
)
parser.add_argument(
"--prediction_type",
default=None,
type=str,
help=(
"The prediction type that the model was trained on. Use 'epsilon' for Stable Diffusion v1.X and Stable"
" Diffusion v2 Base. Use 'v_prediction' for Stable Diffusion v2."
),
)
parser.add_argument(
"--extract_ema",
action="store_true",
help=(
"Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights"
" or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield"
" higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning."
),
)
parser.add_argument(
"--upcast_attention",
action="store_true",
help=(
"Whether the attention computation should always be upcasted. This is necessary when running stable"
" diffusion 2.1."
),
)
parser.add_argument(
"--from_safetensors",
action="store_true",
help="If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.",
)
parser.add_argument(
"--to_safetensors",
action="store_true",
help="Whether to store pipeline in safetensors format or not.",
)
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--device", type=str, help="Device to use (e.g. cpu, cuda:0, cuda:1, etc.)")
parser.add_argument(
"--stable_unclip",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP model. One of 'txt2img' or 'img2img'.",
)
parser.add_argument(
"--stable_unclip_prior",
type=str,
default=None,
required=False,
help="Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.",
)
parser.add_argument(
"--clip_stats_path",
type=str,
help="Path to the clip stats file. Only required if the stable unclip model's config specifies `model.params.noise_aug_config.params.clip_stats_path`.",
required=False,
)
parser.add_argument(
"--controlnet", action="store_true", default=None, help="Set flag if this is a controlnet checkpoint."
)
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--vae_path",
type=str,
default=None,
required=False,
help="Set to a path, hub id to an already converted vae to not convert it again.",
)
__A = parser.parse_args()
__A = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 2
| 1
|
"""simple docstring"""
def lowercase ( ) ->Optional[Any]:
"""simple docstring"""
__snake_case : Dict = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
__snake_case : Optional[Any] = 6
__snake_case : Tuple = 1
__snake_case : Tuple = 1_901
__snake_case : Tuple = 0
while year < 2_001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
__snake_case : str = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
__snake_case : Optional[Any] = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
__snake_case : Tuple = day - days_per_month[month - 2]
if month > 12:
year += 1
__snake_case : Tuple = 1
if year < 2_001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 102
|
'''simple docstring'''
from ....utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ):
'''simple docstring'''
lowercase__ = config.__dict__
lowercase__ = modal_hidden_size
if num_labels:
lowercase__ = num_labels
| 2
| 0
|
"""simple docstring"""
import unittest
from knapsack import knapsack as k
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = [0]
UpperCAmelCase_ = [0]
UpperCAmelCase_ = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 )
UpperCAmelCase_ = [60]
UpperCAmelCase_ = [10]
UpperCAmelCase_ = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 0 )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = 3
UpperCAmelCase_ = [1, 2, 3]
UpperCAmelCase_ = [3, 2, 1]
UpperCAmelCase_ = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 5 )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
UpperCAmelCase_ = 50
UpperCAmelCase_ = [60, 100, 120]
UpperCAmelCase_ = [10, 20, 30]
UpperCAmelCase_ = len(_UpperCAmelCase )
self.assertEqual(k.knapsack(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) , 220 )
if __name__ == "__main__":
unittest.main()
| 241
|
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_mgp_str""": ["""MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MgpstrConfig"""],
"""processing_mgp_str""": ["""MgpstrProcessor"""],
"""tokenization_mgp_str""": ["""MgpstrTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MgpstrModel""",
"""MgpstrPreTrainedModel""",
"""MgpstrForSceneTextRecognition""",
]
if TYPE_CHECKING:
from .configuration_mgp_str import MGP_STR_PRETRAINED_CONFIG_ARCHIVE_MAP, MgpstrConfig
from .processing_mgp_str import MgpstrProcessor
from .tokenization_mgp_str import MgpstrTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mgp_str import (
MGP_STR_PRETRAINED_MODEL_ARCHIVE_LIST,
MgpstrForSceneTextRecognition,
MgpstrModel,
MgpstrPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 241
| 1
|
"""simple docstring"""
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __A (_SCREAMING_SNAKE_CASE = 3 ) ->qiskit.result.counts.Counts:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
lowerCAmelCase__ :List[Any] = QuantumRegister(_SCREAMING_SNAKE_CASE , 'qr' )
lowerCAmelCase__ :Any = ClassicalRegister(_SCREAMING_SNAKE_CASE , 'cr' )
lowerCAmelCase__ :List[str] = QuantumCircuit(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = number_of_qubits
for i in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
lowerCAmelCase__ :Any = Aer.get_backend('qasm_simulator' )
lowerCAmelCase__ :Any = execute(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , shots=1_0000 )
return job.result().get_counts(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
F'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 293
|
"""simple docstring"""
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__A = logging.getLogger(__name__)
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
if os.path.exists(_SCREAMING_SNAKE_CASE ):
if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ) and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) ):
os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'config.json' ) )
if os.path.exists(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ):
os.remove(os.path.join(_SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) )
else:
os.makedirs(_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ :Dict = 2
if unlogit:
lowerCAmelCase__ :List[str] = torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = p * torch.log(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[str] = 0
return -plogp.sum(dim=-1 )
def __A (_SCREAMING_SNAKE_CASE ) ->Dict:
"""simple docstring"""
logger.info('lv, h >\t' + '\t'.join(F"{x + 1}" for x in range(len(_SCREAMING_SNAKE_CASE ) ) ) )
for row in range(len(_SCREAMING_SNAKE_CASE ) ):
if tensor.dtype != torch.long:
logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:.5f}" for x in tensor[row].cpu().data ) )
else:
logger.info(F"layer {row + 1}:\t" + '\t'.join(F"{x:d}" for x in tensor[row].cpu().data ) )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=False ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Dict = model.config.num_hidden_layers, model.config.num_attention_heads
lowerCAmelCase__ :Any = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
lowerCAmelCase__ :Tuple = torch.zeros(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
if head_mask is None:
lowerCAmelCase__ :Optional[int] = torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).to(args.device )
head_mask.requires_grad_(requires_grad=_SCREAMING_SNAKE_CASE )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
lowerCAmelCase__ :List[str] = None
lowerCAmelCase__ :Any = 0.0
lowerCAmelCase__ :Any = 0.0
for step, inputs in enumerate(tqdm(_SCREAMING_SNAKE_CASE , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
lowerCAmelCase__ :str = tuple(t.to(args.device ) for t in inputs )
((lowerCAmelCase__) , ) :Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
lowerCAmelCase__ :str = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :str = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Optional[Any] = entropy(attn.detach() , _SCREAMING_SNAKE_CASE )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(_SCREAMING_SNAKE_CASE ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
lowerCAmelCase__ :Union[str, Any] = 2
lowerCAmelCase__ :Tuple = torch.pow(torch.pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
lowerCAmelCase__ :str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
logger.info('Head ranked by importance scores' )
lowerCAmelCase__ :List[Any] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
lowerCAmelCase__ :List[Any] = torch.arange(
head_importance.numel() , device=args.device )
lowerCAmelCase__ :int = head_ranks.view_as(_SCREAMING_SNAKE_CASE )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
return attn_entropy, head_importance, total_loss
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , _SCREAMING_SNAKE_CASE , original_score * args.masking_threshold )
lowerCAmelCase__ :Optional[int] = torch.ones_like(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
lowerCAmelCase__ :List[str] = original_score
while current_score >= original_score * args.masking_threshold:
lowerCAmelCase__ :List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
lowerCAmelCase__ :str = float('Inf' )
lowerCAmelCase__ :List[str] = head_importance.view(-1 ).sort()[1]
if len(_SCREAMING_SNAKE_CASE ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
lowerCAmelCase__ :int = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
lowerCAmelCase__ :Dict = new_head_mask.view(-1 )
lowerCAmelCase__ :Any = 0.0
lowerCAmelCase__ :Tuple = new_head_mask.view_as(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Optional[int] = new_head_mask.clone().detach()
print_ad_tensor(_SCREAMING_SNAKE_CASE )
# Compute metric and head importance again
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , _SCREAMING_SNAKE_CASE , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(_SCREAMING_SNAKE_CASE )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = datetime.now()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = 1 / loss
lowerCAmelCase__ :Tuple = datetime.now() - before_time
lowerCAmelCase__ :List[str] = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ :List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_SCREAMING_SNAKE_CASE ) )
}
for k, v in heads_to_prune.items():
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ :Union[str, Any] = [
v,
]
assert sum(len(_SCREAMING_SNAKE_CASE ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = sum(p.numel() for p in model.parameters() )
lowerCAmelCase__ :int = datetime.now()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Dict = compute_heads_importance(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , compute_entropy=_SCREAMING_SNAKE_CASE , compute_importance=_SCREAMING_SNAKE_CASE , head_mask=_SCREAMING_SNAKE_CASE , actually_pruned=_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ :int = 1 / loss
lowerCAmelCase__ :Tuple = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(_SCREAMING_SNAKE_CASE , args.output_dir )
def __A () ->Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ :List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , required=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=_SCREAMING_SNAKE_CASE , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=_SCREAMING_SNAKE_CASE , type=_SCREAMING_SNAKE_CASE , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=_SCREAMING_SNAKE_CASE , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=_SCREAMING_SNAKE_CASE , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=_SCREAMING_SNAKE_CASE , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=_SCREAMING_SNAKE_CASE , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=_SCREAMING_SNAKE_CASE , help='Batch size.' )
parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 )
parser.add_argument('--local_rank' , type=_SCREAMING_SNAKE_CASE , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=_SCREAMING_SNAKE_CASE , default='' , help='Can be used for distant debugging.' )
lowerCAmelCase__ :Any = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_SCREAMING_SNAKE_CASE )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
lowerCAmelCase__ :List[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
lowerCAmelCase__ :Optional[int] = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
lowerCAmelCase__ :Dict = torch.device('cuda' , args.local_rank )
lowerCAmelCase__ :Tuple = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
lowerCAmelCase__ :int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
lowerCAmelCase__ :Optional[Any] = nn.parallel.DistributedDataParallel(
_SCREAMING_SNAKE_CASE , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_SCREAMING_SNAKE_CASE )
elif args.n_gpu > 1:
lowerCAmelCase__ :Union[str, Any] = nn.DataParallel(_SCREAMING_SNAKE_CASE )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=_SCREAMING_SNAKE_CASE )
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , _SCREAMING_SNAKE_CASE )
# Prepare dataset
lowerCAmelCase__ :Optional[int] = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
lowerCAmelCase__ :Union[str, Any] = (torch.from_numpy(_SCREAMING_SNAKE_CASE ),)
lowerCAmelCase__ :Optional[int] = TensorDataset(*_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :List[Any] = RandomSampler(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Dict = DataLoader(_SCREAMING_SNAKE_CASE , sampler=_SCREAMING_SNAKE_CASE , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
lowerCAmelCase__ :Optional[Any] = mask_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
prune_heads(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 293
| 1
|
import torch
from diffusers import DiffusionPipeline
class __A ( a ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def __call__( self ):
"""simple docstring"""
__UpperCamelCase : Dict =torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
__UpperCamelCase : Tuple =1
__UpperCamelCase : Optional[int] =self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
__UpperCamelCase : Optional[Any] =self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
__UpperCamelCase : str =scheduler_output - scheduler_output + torch.ones_like(lowerCamelCase__ )
return result
| 245
|
def A ( a_ ) -> bool:
return sum(i for i in range(1 ,number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
A_ :List[str] = int(input('''Enter number: ''').strip())
print(f"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
| 245
| 1
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE( metaclass=A ):
SCREAMING_SNAKE_CASE_ : Dict = ['''torch''', '''torchsde''']
def __init__( self ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self ,['''torch''', '''torchsde'''] )
@classmethod
def _UpperCamelCase ( cls ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Dict:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
@classmethod
def _UpperCamelCase ( cls ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls ,['''torch''', '''torchsde'''] )
| 191
|
"""simple docstring"""
from typing import Any
def __lowerCamelCase ( a_ : list ) -> list[Any]:
if not input_list:
return []
__SCREAMING_SNAKE_CASE :int = [input_list.count(a_ ) for value in input_list]
__SCREAMING_SNAKE_CASE :str = max(a_ ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a_ ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 191
| 1
|
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument(
"--txt2img_unclip",
default="kakaobrain/karlo-v1-alpha",
type=str,
required=False,
help="The pretrained txt2img unclip.",
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
__SCREAMING_SNAKE_CASE =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__SCREAMING_SNAKE_CASE =CLIPImageProcessor()
__SCREAMING_SNAKE_CASE =CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14")
__SCREAMING_SNAKE_CASE =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 358
|
"""simple docstring"""
class UpperCamelCase :
def __init__( self ,__UpperCamelCase ) -> None:
'''simple docstring'''
lowercase_ : int = set_counts
lowercase_ : List[Any] = max(__UpperCamelCase )
lowercase_ : Union[str, Any] = len(__UpperCamelCase )
lowercase_ : Dict = [1] * num_sets
lowercase_ : Optional[int] = list(range(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool:
'''simple docstring'''
lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase )
lowercase_ : int = self.get_parent(__UpperCamelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase_ : Tuple = 0
lowercase_ : str = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase_ : Union[str, Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase_ : str = 0
lowercase_ : Tuple = src_parent
lowercase_ : int = self.set_counts[src_parent]
lowercase_ : str = max(self.max_set ,__UpperCamelCase )
return True
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 321
| 0
|
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
@dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : str
lowerCAmelCase__ : str
lowerCAmelCase__ : Optional[str] = None
lowerCAmelCase__ : Optional[str] = None
lowerCAmelCase__ : Optional[str] = None
@dataclass(frozen=lowercase_ )
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : List[int]
lowerCAmelCase__ : Optional[List[int]] = None
lowerCAmelCase__ : Optional[List[int]] = None
lowerCAmelCase__ : Optional[Union[int, float]] = None
lowerCAmelCase__ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : List[InputFeatures]
def __init__(self : Optional[int] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : List[Any]=False , UpperCamelCase : bool = False , ):
'''simple docstring'''
lowercase__ = hans_processors[task]()
lowercase__ = os.path.join(
UpperCamelCase , '''cached_{}_{}_{}_{}'''.format(
'''dev''' if evaluate else '''train''' , tokenizer.__class__.__name__ , str(UpperCamelCase ) , UpperCamelCase , ) , )
lowercase__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ ,lowercase__ = label_list[2], label_list[1]
lowercase__ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowercase__ = cached_features_file + '''.lock'''
with FileLock(UpperCamelCase ):
if os.path.exists(UpperCamelCase ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
lowercase__ = torch.load(UpperCamelCase )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
lowercase__ = (
processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
)
logger.info('''Training examples: %s''' , len(UpperCamelCase ) )
lowercase__ = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
logger.info('''Saving features into cached file %s''' , UpperCamelCase )
torch.save(self.features , UpperCamelCase )
def __len__(self : Optional[int] ):
'''simple docstring'''
return len(self.features )
def __getitem__(self : Tuple , UpperCamelCase : str ):
'''simple docstring'''
return self.features[i]
def UpperCamelCase__ (self : str ):
'''simple docstring'''
return self.label_list
if is_tf_available():
import tensorflow as tf
class __lowerCAmelCase :
'''simple docstring'''
lowerCAmelCase__ : List[InputFeatures]
def __init__(self : List[Any] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = 128 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , ):
'''simple docstring'''
lowercase__ = hans_processors[task]()
lowercase__ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
lowercase__ ,lowercase__ = label_list[2], label_list[1]
lowercase__ = label_list
lowercase__ = processor.get_dev_examples(UpperCamelCase ) if evaluate else processor.get_train_examples(UpperCamelCase )
lowercase__ = hans_convert_examples_to_features(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='''convert examples to features''' ):
if ex_index % 10000 == 0:
logger.info('''Writing example %d of %d''' % (ex_index, len(UpperCamelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
lowercase__ = tf.data.Dataset.from_generator(
UpperCamelCase , (
{
'''example_id''': tf.intaa,
'''input_ids''': tf.intaa,
'''attention_mask''': tf.intaa,
'''token_type_ids''': tf.intaa,
},
tf.intaa,
) , (
{
'''example_id''': tf.TensorShape([] ),
'''input_ids''': tf.TensorShape([None, None] ),
'''attention_mask''': tf.TensorShape([None, None] ),
'''token_type_ids''': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
return self.dataset
def __len__(self : List[Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__(self : Union[str, Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
return self.features[i]
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
return self.label_list
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Any ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , '''heuristics_train_set.txt''' ) ) , '''train''' )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self._create_examples(self._read_tsv(os.path.join(UpperCamelCase , '''heuristics_evaluation_set.txt''' ) ) , '''dev''' )
def UpperCamelCase__ (self : Dict ):
'''simple docstring'''
return ["contradiction", "entailment", "neutral"]
def UpperCamelCase__ (self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Optional[int] ):
'''simple docstring'''
lowercase__ = []
for i, line in enumerate(UpperCamelCase ):
if i == 0:
continue
lowercase__ = '''%s-%s''' % (set_type, line[0])
lowercase__ = line[5]
lowercase__ = line[6]
lowercase__ = line[7][2:] if line[7].startswith('''ex''' ) else line[7]
lowercase__ = line[0]
examples.append(InputExample(guid=UpperCamelCase , text_a=UpperCamelCase , text_b=UpperCamelCase , label=UpperCamelCase , pairID=UpperCamelCase ) )
return examples
def _SCREAMING_SNAKE_CASE (A , A , A , A , ) -> int:
"""simple docstring"""
lowercase__ = {label: i for i, label in enumerate(A )}
lowercase__ = []
for ex_index, example in tqdm.tqdm(enumerate(A ) , desc='''convert examples to features''' ):
if ex_index % 10_000 == 0:
logger.info('''Writing example %d''' % (ex_index) )
lowercase__ = tokenizer(
example.text_a , example.text_b , add_special_tokens=A , max_length=A , padding='''max_length''' , truncation=A , return_overflowing_tokens=A , )
lowercase__ = label_map[example.label] if example.label in label_map else 0
lowercase__ = int(example.pairID )
features.append(InputFeatures(**A , label=A , pairID=A ) )
for i, example in enumerate(examples[:5] ):
logger.info('''*** Example ***''' )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
lowerCamelCase : Tuple = {
'hans': 3,
}
lowerCamelCase : str = {
'hans': HansProcessor,
}
| 2
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase : str = {
'configuration_rag': ['RagConfig'],
'retrieval_rag': ['RagRetriever'],
'tokenization_rag': ['RagTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Dict = [
'RagModel',
'RagPreTrainedModel',
'RagSequenceForGeneration',
'RagTokenForGeneration',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Optional[int] = [
'TFRagModel',
'TFRagPreTrainedModel',
'TFRagSequenceForGeneration',
'TFRagTokenForGeneration',
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 2
| 1
|
'''simple docstring'''
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
lowerCAmelCase : List[Any] =True
except ImportError:
lowerCAmelCase : str =False
lowerCAmelCase : int =logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase_ ( __lowerCamelCase : Namespace ):
return AddNewModelCommand(args.testing ,args.testing_file ,path=args.path )
class a_ ( _lowerCAmelCase ):
@staticmethod
def lowercase__ ( lowercase : ArgumentParser ):
"""simple docstring"""
lowercase_ :Tuple = parser.add_parser("add-new-model" )
add_new_model_parser.add_argument("--testing" , action="store_true" , help="If in testing mode." )
add_new_model_parser.add_argument("--testing_file" , type=lowerCAmelCase__ , help="Configuration file on which to run." )
add_new_model_parser.add_argument(
"--path" , type=lowerCAmelCase__ , help="Path to cookiecutter. Should only be used for testing purposes." )
add_new_model_parser.set_defaults(func=lowerCAmelCase__ )
def __init__( self : Union[str, Any] , lowercase : bool , lowercase : str , lowercase : List[Any]=None , *lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Dict = testing
lowercase_ :Tuple = testing_file
lowercase_ :Optional[Any] = path
def lowercase__ ( self : str ):
"""simple docstring"""
warnings.warn(
"The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. "
"It is not actively maintained anymore, so might give a result that won't pass all tests and quality "
"checks, you should use `transformers-cli add-new-model-like` instead." )
if not _has_cookiecutter:
raise ImportError(
"Model creation dependencies are required to use the `add_new_model` command. Install them by running "
"the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n" )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
lowercase_ :List[str] = [directory for directory in os.listdir() if "cookiecutter-template-" == directory[:22]]
if len(lowerCAmelCase__ ) > 0:
raise ValueError(
"Several directories starting with `cookiecutter-template-` in current working directory. "
"Please clean your directory by removing all folders starting with `cookiecutter-template-` or "
"change your working directory." )
lowercase_ :List[str] = (
Path(lowerCAmelCase__ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
lowercase_ :List[Any] = path_to_transformer_root / "templates" / "adding_a_new_model"
# Execute cookiecutter
if not self._testing:
cookiecutter(str(lowerCAmelCase__ ) )
else:
with open(self._testing_file , "r" ) as configuration_file:
lowercase_ :int = json.load(lowerCAmelCase__ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) , no_input=lowerCAmelCase__ , extra_context=lowerCAmelCase__ , )
lowercase_ :str = [directory for directory in os.listdir() if "cookiecutter-template-" in directory[:22]][0]
# Retrieve configuration
with open(directory + "/configuration.json" , "r" ) as configuration_file:
lowercase_ :str = json.load(lowerCAmelCase__ )
lowercase_ :List[Any] = configuration["lowercase_modelname"]
lowercase_ :List[Any] = configuration["generate_tensorflow_pytorch_and_flax"]
os.remove(F'{directory}/configuration.json' )
lowercase_ :Dict = "PyTorch" in generate_tensorflow_pytorch_and_flax
lowercase_ :Union[str, Any] = "TensorFlow" in generate_tensorflow_pytorch_and_flax
lowercase_ :Optional[int] = "Flax" in generate_tensorflow_pytorch_and_flax
lowercase_ :Optional[Any] = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(lowerCAmelCase__ , exist_ok=lowerCAmelCase__ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' , exist_ok=lowerCAmelCase__ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' , "w" ):
pass
shutil.move(
F'{directory}/__init__.py' , F'{model_dir}/__init__.py' , )
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' , F'{model_dir}/configuration_{lowercase_model_name}.py' , )
def remove_copy_lines(lowercase : List[Any] ):
with open(lowerCAmelCase__ , "r" ) as f:
lowercase_ :str = f.readlines()
with open(lowerCAmelCase__ , "w" ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(lowerCAmelCase__ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' , F'{model_dir}/modeling_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' , F'{model_dir}/modeling_tf_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' , F'{model_dir}/modeling_flax_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' , F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' , )
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' , F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' , )
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}.py' , )
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' , F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' , )
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(lowercase : str , lowercase : str , lowercase : List[str] ):
# Create temp file
lowercase_ :Dict = mkstemp()
lowercase_ :List[Any] = False
with fdopen(lowerCAmelCase__ , "w" ) as new_file:
with open(lowerCAmelCase__ ) as old_file:
for line in old_file:
new_file.write(lowerCAmelCase__ )
if line_to_copy_below in line:
lowercase_ :Optional[int] = True
for line_to_copy in lines_to_copy:
new_file.write(lowerCAmelCase__ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(lowerCAmelCase__ , lowerCAmelCase__ )
# Remove original file
remove(lowerCAmelCase__ )
# Move new file
move(lowerCAmelCase__ , lowerCAmelCase__ )
def skip_units(lowercase : Optional[int] ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(lowercase : Union[str, Any] ):
with open(lowerCAmelCase__ ) as datafile:
lowercase_ :int = []
lowercase_ :Union[str, Any] = False
lowercase_ :List[Any] = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
lowercase_ :Union[str, Any] = line.split("\"" )[1]
lowercase_ :str = skip_units(lowerCAmelCase__ )
elif "# Below: " in line and "##" not in line:
lowercase_ :Union[str, Any] = line.split("\"" )[1]
lowercase_ :List[str] = skip_units(lowerCAmelCase__ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowercase_ :Tuple = []
elif "# Replace with" in line and "##" not in line:
lowercase_ :List[str] = []
elif "##" not in line:
lines_to_copy.append(lowerCAmelCase__ )
remove(lowerCAmelCase__ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(lowerCAmelCase__ )
| 367
|
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( _lowerCAmelCase ):
def __init__( self : Any , lowercase : int , lowercase : Union[str, Any]=13 , lowercase : List[str]=7 , lowercase : List[str]=True , lowercase : int=True , lowercase : Tuple=True , lowercase : int=True , lowercase : List[Any]=99 , lowercase : Optional[int]=32 , lowercase : Dict=5 , lowercase : Optional[int]=4 , lowercase : List[str]=37 , lowercase : Tuple="gelu" , lowercase : List[Any]=0.1 , lowercase : Tuple=0.1 , lowercase : List[str]=512 , lowercase : str=16 , lowercase : Tuple=2 , lowercase : List[Any]=0.02 , lowercase : Dict=False , lowercase : Dict=True , lowercase : int="None" , lowercase : Optional[Any]=3 , lowercase : Dict=4 , lowercase : List[Any]=None , ):
"""simple docstring"""
lowercase_ :int = parent
lowercase_ :str = batch_size
lowercase_ :Tuple = seq_length
lowercase_ :Union[str, Any] = is_training
lowercase_ :Dict = use_input_mask
lowercase_ :Any = use_token_type_ids
lowercase_ :Tuple = use_labels
lowercase_ :Dict = vocab_size
lowercase_ :Tuple = hidden_size
lowercase_ :Union[str, Any] = num_hidden_layers
lowercase_ :int = num_attention_heads
lowercase_ :List[Any] = intermediate_size
lowercase_ :Tuple = hidden_act
lowercase_ :str = hidden_dropout_prob
lowercase_ :Any = attention_probs_dropout_prob
lowercase_ :List[Any] = max_position_embeddings
lowercase_ :Union[str, Any] = type_vocab_size
lowercase_ :Union[str, Any] = type_sequence_label_size
lowercase_ :Any = initializer_range
lowercase_ :List[Any] = num_labels
lowercase_ :str = num_choices
lowercase_ :Optional[Any] = relative_attention
lowercase_ :Tuple = position_biased_input
lowercase_ :Union[str, Any] = pos_att_type
lowercase_ :Tuple = scope
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :Union[str, Any] = None
if self.use_input_mask:
lowercase_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowercase_ :List[Any] = None
if self.use_token_type_ids:
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase_ :str = None
lowercase_ :Union[str, Any] = None
lowercase_ :List[str] = None
if self.use_labels:
lowercase_ :List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase_ :str = ids_tensor([self.batch_size] , self.num_choices )
lowercase_ :Optional[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Dict = self.get_config()
lowercase_ :Optional[Any] = 300
return config
def lowercase__ ( self : Optional[Any] , lowercase : Dict ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase__ ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : List[Any] , lowercase : Tuple , lowercase : str , lowercase : Optional[Any] , lowercase : Optional[int] ):
"""simple docstring"""
lowercase_ :str = DebertaModel(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase )[0]
lowercase_ :Union[str, Any] = model(lowercase , token_type_ids=lowercase )[0]
lowercase_ :Dict = model(lowercase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase__ ( self : Dict , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Dict , lowercase : Dict , lowercase : str , lowercase : Tuple ):
"""simple docstring"""
lowercase_ :Dict = DebertaForMaskedLM(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase__ ( self : Optional[int] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : List[Any] , lowercase : int , lowercase : Dict ):
"""simple docstring"""
lowercase_ :Dict = self.num_labels
lowercase_ :int = DebertaForSequenceClassification(lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Union[str, Any] = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(lowercase )
def lowercase__ ( self : List[Any] , lowercase : List[str] , lowercase : Tuple , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Dict , lowercase : int ):
"""simple docstring"""
lowercase_ :List[str] = self.num_labels
lowercase_ :Optional[int] = DebertaForTokenClassification(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Dict = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase__ ( self : Union[str, Any] , lowercase : Tuple , lowercase : Any , lowercase : List[Any] , lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : Optional[int] , lowercase : List[Any] ):
"""simple docstring"""
lowercase_ :Any = DebertaForQuestionAnswering(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :List[Any] = model(
lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :Optional[int] = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :List[str] = config_and_inputs
lowercase_ :Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
__A = False
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :List[Any] = DebertaModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=lowercase , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*lowercase )
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*lowercase )
def lowercase__ ( self : int ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*lowercase )
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*lowercase )
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Tuple = DebertaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@unittest.skip(reason="Model not available yet" )
def lowercase__ ( self : Dict ):
"""simple docstring"""
pass
@slow
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
lowercase_ :Optional[Any] = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowercase_ :Dict = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowercase_ :Optional[Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase_ :Optional[int] = model(lowercase , attention_mask=lowercase )[0]
# compare the actual values for a slice.
lowercase_ :List[Any] = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , lowercase , atol=1e-4 ) , F'{output[:, 1:4, 1:4]}' )
| 147
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
with open(__UpperCamelCase ) as metadata_file:
lowerCAmelCase_ : Optional[int] = json.load(__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = LukeConfig(use_entity_aware_attention=__UpperCamelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowerCAmelCase_ : List[str] = torch.load(__UpperCamelCase , map_location="cpu" )
# Load the entity vocab file
lowerCAmelCase_ : int = load_entity_vocab(__UpperCamelCase )
lowerCAmelCase_ : Dict = RobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCAmelCase_ : Union[str, Any] = AddedToken("<ent>" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
lowerCAmelCase_ : Tuple = AddedToken("<ent2>" , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'''Saving tokenizer to {pytorch_dump_folder_path}''' )
tokenizer.save_pretrained(__UpperCamelCase )
with open(os.path.join(__UpperCamelCase , LukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__UpperCamelCase , __UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = LukeTokenizer.from_pretrained(__UpperCamelCase )
# Initialize the embeddings of the special tokens
lowerCAmelCase_ : Union[str, Any] = state_dict["embeddings.word_embeddings.weight"]
lowerCAmelCase_ : int = word_emb[tokenizer.convert_tokens_to_ids(["@"] )[0]].unsqueeze(0 )
lowerCAmelCase_ : List[Any] = word_emb[tokenizer.convert_tokens_to_ids(["#"] )[0]].unsqueeze(0 )
lowerCAmelCase_ : Dict = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCAmelCase_ : Tuple = f'''encoder.layer.{layer_index}.attention.self.'''
lowerCAmelCase_ : Any = state_dict[prefix + matrix_name]
lowerCAmelCase_ : List[str] = state_dict[prefix + matrix_name]
lowerCAmelCase_ : Optional[int] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCAmelCase_ : Tuple = state_dict["entity_embeddings.entity_embeddings.weight"]
lowerCAmelCase_ : Dict = entity_emb[entity_vocab["[MASK]"]]
lowerCAmelCase_ : Tuple = LukeModel(config=__UpperCamelCase ).eval()
lowerCAmelCase_ , lowerCAmelCase_ : Union[str, Any] = model.load_state_dict(__UpperCamelCase , strict=__UpperCamelCase )
if not (len(__UpperCamelCase ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'''Missing keys {", ".join(__UpperCamelCase )}. Expected only missing embeddings.position_ids''' )
if not (all(key.startswith("entity_predictions" ) or key.startswith("lm_head" ) for key in unexpected_keys )):
raise ValueError(
"Unexpected keys"
f''' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}''' )
# Check outputs
lowerCAmelCase_ : str = LukeTokenizer.from_pretrained(__UpperCamelCase , task="entity_classification" )
lowerCAmelCase_ : List[Any] = (
"Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the"
" new world number one avoid a humiliating second- round exit at Wimbledon ."
)
lowerCAmelCase_ : str = (39, 42)
lowerCAmelCase_ : List[str] = tokenizer(__UpperCamelCase , entity_spans=[span] , add_prefix_space=__UpperCamelCase , return_tensors="pt" )
lowerCAmelCase_ : int = model(**__UpperCamelCase )
# Verify word hidden states
if model_size == "large":
lowerCAmelCase_ : Optional[Any] = torch.Size((1, 42, 1024) )
lowerCAmelCase_ : Tuple = torch.tensor(
[[0.01_33, 0.08_65, 0.00_95], [0.30_93, -0.25_76, -0.74_18], [-0.17_20, -0.21_17, -0.28_69]] )
else: # base
lowerCAmelCase_ : str = torch.Size((1, 42, 768) )
lowerCAmelCase_ : List[Any] = torch.tensor([[0.00_37, 0.13_68, -0.00_91], [0.10_99, 0.33_29, -0.10_95], [0.07_65, 0.53_35, 0.11_79]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'''Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}''' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
lowerCAmelCase_ : Optional[int] = torch.Size((1, 1, 1024) )
lowerCAmelCase_ : Optional[int] = torch.tensor([[0.04_66, -0.01_06, -0.01_79]] )
else: # base
lowerCAmelCase_ : List[str] = torch.Size((1, 1, 768) )
lowerCAmelCase_ : Optional[Any] = torch.tensor([[0.14_57, 0.10_44, 0.01_74]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'''Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'''
f''' {expected_shape}''' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCamelCase , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCamelCase ) )
model.save_pretrained(__UpperCamelCase )
def __lowerCamelCase ( __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : str = {}
with open(__UpperCamelCase , "r" , encoding="utf-8" ) as f:
for index, line in enumerate(__UpperCamelCase ):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = line.rstrip().split("\t" )
lowerCAmelCase_ : Optional[Any] = index
return entity_vocab
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
lowercase__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 241
|
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def __lowerCamelCase ( __UpperCamelCase ) -> Dict:
"""simple docstring"""
return EnvironmentCommand()
def __lowerCamelCase ( __UpperCamelCase ) -> Optional[int]:
"""simple docstring"""
return EnvironmentCommand(args.accelerate_config_file )
class __lowerCamelCase ( A__ ):
'''simple docstring'''
@staticmethod
def lowerCamelCase ( a_ : ArgumentParser ):
lowerCAmelCase_ : str = parser.add_parser("env" )
download_parser.set_defaults(func=a_ )
download_parser.add_argument(
"--accelerate-config_file" , default=a_ , help="The accelerate config file to use for the default values in the launching script." , )
download_parser.set_defaults(func=a_ )
def __init__( self : Dict , a_ : Dict , *a_ : str ):
lowerCAmelCase_ : Union[str, Any] = accelerate_config_file
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Optional[int] = "not installed"
if is_safetensors_available():
import safetensors
lowerCAmelCase_ : int = safetensors.__version__
elif importlib.util.find_spec("safetensors" ) is not None:
import safetensors
lowerCAmelCase_ : Optional[Any] = f'''{safetensors.__version__} but is ignored because of PyTorch version too old.'''
lowerCAmelCase_ : List[Any] = "not installed"
lowerCAmelCase_ : Dict = "not found"
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
lowerCAmelCase_ : int = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(a_ ):
lowerCAmelCase_ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
lowerCAmelCase_ : Any = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(a_ , a_ )
else f'''\t{accelerate_config}'''
)
lowerCAmelCase_ : Union[str, Any] = "not installed"
lowerCAmelCase_ : Dict = "NA"
if is_torch_available():
import torch
lowerCAmelCase_ : Tuple = torch.__version__
lowerCAmelCase_ : Union[str, Any] = torch.cuda.is_available()
lowerCAmelCase_ : List[str] = "not installed"
lowerCAmelCase_ : Tuple = "NA"
if is_tf_available():
import tensorflow as tf
lowerCAmelCase_ : Union[str, Any] = tf.__version__
try:
# deprecated in v2.1
lowerCAmelCase_ : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
lowerCAmelCase_ : List[str] = bool(tf.config.list_physical_devices("GPU" ) )
lowerCAmelCase_ : Optional[Any] = "not installed"
lowerCAmelCase_ : Optional[int] = "not installed"
lowerCAmelCase_ : Tuple = "not installed"
lowerCAmelCase_ : Tuple = "NA"
if is_flax_available():
import flax
import jax
import jaxlib
lowerCAmelCase_ : List[Any] = flax.__version__
lowerCAmelCase_ : Tuple = jax.__version__
lowerCAmelCase_ : List[Any] = jaxlib.__version__
lowerCAmelCase_ : str = jax.lib.xla_bridge.get_backend().platform
lowerCAmelCase_ : Dict = {
"`transformers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Huggingface_hub version": huggingface_hub.__version__,
"Safetensors version": f'''{safetensors_version}''',
"Accelerate version": f'''{accelerate_version}''',
"Accelerate config": f'''{accelerate_config_str}''',
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"Tensorflow version (GPU?)": f'''{tf_version} ({tf_cuda_available})''',
"Flax version (CPU?/GPU?/TPU?)": f'''{flax_version} ({jax_backend})''',
"Jax version": f'''{jax_version}''',
"JaxLib version": f'''{jaxlib_version}''',
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(a_ ) )
return info
@staticmethod
def lowerCamelCase ( a_ : Tuple ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 241
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 332
|
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowerCAmelCase_ = pytest.mark.integration
@pytest.mark.parametrize('path' , ['paws', 'csv'] )
def __magic_name__ ( A , A ) -> Union[str, Any]:
inspect_dataset(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.filterwarnings('ignore:inspect_metric is deprecated:FutureWarning' )
@pytest.mark.filterwarnings('ignore:metric_module_factory is deprecated:FutureWarning' )
@pytest.mark.parametrize('path' , ['accuracy'] )
def __magic_name__ ( A , A ) -> int:
inspect_metric(A , A )
snake_case = path + '.py'
assert script_name in os.listdir(A )
assert "__pycache__" not in os.listdir(A )
@pytest.mark.parametrize(
'path, config_name, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_config_info(A , config_name=A )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> Any:
with pytest.raises(A ):
get_dataset_config_info(A , config_name=A )
@pytest.mark.parametrize(
'path, expected' , [
('squad', 'plain_text'),
('acronym_identification', 'default'),
('lhoestq/squad', 'plain_text'),
('lhoestq/test', 'default'),
('lhoestq/demo1', 'lhoestq--demo1'),
('dalle-mini/wit', 'dalle-mini--wit'),
] , )
def __magic_name__ ( A , A ) -> Dict:
snake_case = get_dataset_config_names(A )
assert expected in config_names
@pytest.mark.parametrize(
'path, expected_configs, expected_splits_in_first_config' , [
('squad', ['plain_text'], ['train', 'validation']),
('dalle-mini/wit', ['dalle-mini--wit'], ['train']),
('paws', ['labeled_final', 'labeled_swap', 'unlabeled_final'], ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> List[str]:
snake_case = get_dataset_infos(A )
assert list(infos.keys() ) == expected_configs
snake_case = expected_configs[0]
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'path, expected_config, expected_splits' , [
('squad', 'plain_text', ['train', 'validation']),
('dalle-mini/wit', 'dalle-mini--wit', ['train']),
('paws', 'labeled_final', ['train', 'test', 'validation']),
] , )
def __magic_name__ ( A , A , A ) -> Any:
snake_case = get_dataset_infos(A )
assert expected_config in infos
snake_case = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'path, config_name, expected_exception' , [
('paws', None, ValueError),
] , )
def __magic_name__ ( A , A , A ) -> int:
with pytest.raises(A ):
get_dataset_split_names(A , config_name=A )
| 332
| 1
|
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
UpperCAmelCase__ : Tuple = False
try:
UpperCAmelCase__ : Optional[Any] = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class a__ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase__ : str = None , UpperCAmelCase__ : list = [] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : List[Any] = choices
SCREAMING_SNAKE_CASE : Any = prompt
if sys.platform == "win32":
SCREAMING_SNAKE_CASE : List[Any] = """*"""
else:
SCREAMING_SNAKE_CASE : List[str] = """➔ """
def _lowercase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : str = "" ) ->Union[str, Any]:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , UpperCAmelCase__ )
else:
forceWrite(self.choices[index] , UpperCAmelCase__ )
def _lowercase ( self : List[str] , UpperCAmelCase__ : int ) ->Optional[int]:
"""simple docstring"""
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(UpperCAmelCase__ )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def _lowercase ( self : Tuple , UpperCAmelCase__ : Direction , UpperCAmelCase__ : int = 1 ) ->List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(UpperCAmelCase__ )
move_cursor(UpperCAmelCase__ , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["""up"""] )
def _lowercase ( self : Optional[Any] ) ->List[Any]:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP["""down"""] )
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["""newline"""] )
def _lowercase ( self : List[Any] ) ->Any:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
return self.position
@input.mark(KEYMAP["""interrupt"""] )
def _lowercase ( self : Any ) ->Optional[Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , """DOWN""" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(UpperCAmelCase__ )] for number in range(1_0 )] )
def _lowercase ( self : Union[str, Any] ) ->List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = int(chr(self.current_selection ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , UpperCAmelCase__ )
else:
return
else:
return
def _lowercase ( self : Optional[Any] , UpperCAmelCase__ : int = 0 ) ->List[Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , """\n""" )
if in_colab:
forceWrite("""Please input a choice index (starting from 0), and press enter""" , """\n""" )
else:
forceWrite("""Please select a choice using the arrow or number keys, and selecting with enter""" , """\n""" )
SCREAMING_SNAKE_CASE : str = default_choice
for i in range(len(self.choices ) ):
self.print_choice(UpperCAmelCase__ )
forceWrite("""\n""" )
move_cursor(len(self.choices ) - self.position , """UP""" )
with cursor.hide():
while True:
if in_colab:
try:
SCREAMING_SNAKE_CASE : List[Any] = int(builtins.input() )
except ValueError:
SCREAMING_SNAKE_CASE : List[Any] = default_choice
else:
SCREAMING_SNAKE_CASE : Dict = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , """UP""" )
clear_line()
self.write_choice(UpperCAmelCase__ , """\n""" )
return choice
| 245
|
import os
import pytest
from attr import dataclass
UpperCAmelCase__ : Optional[int] = """us-east-1""" # defaults region
@dataclass
class a__ :
"""simple docstring"""
UpperCAmelCase__ : str
UpperCAmelCase__ : Union[str, Any] ="""arn:aws:iam::558105141721:role/sagemaker_execution_role"""
UpperCAmelCase__ : Tuple ={
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 1_6,
"""per_device_eval_batch_size""": 1_6,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 5_0_0,
"""save_steps""": 5_5_0_0,
}
UpperCAmelCase__ : Optional[int] ={**hyperparameters, """max_steps""": 1_0_0_0}
@property
def _lowercase ( self : List[str] ) ->str:
"""simple docstring"""
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _lowercase ( self : Any ) ->str:
"""simple docstring"""
return f"{self.framework}-transfromers-test"
@property
def _lowercase ( self : Union[str, Any] ) ->str:
"""simple docstring"""
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def _lowercase ( self : Dict ) ->str:
"""simple docstring"""
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""" )
def __lowercase ( _A ) -> Tuple:
SCREAMING_SNAKE_CASE : List[str] = SageMakerTestEnvironment(framework=request.cls.framework )
| 245
| 1
|
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 258
|
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
__UpperCamelCase : Union[str, Any] = datasets.load_iris()
__UpperCamelCase : Any = np.array(data['data'])
__UpperCamelCase : Dict = np.array(data['target'])
__UpperCamelCase : Union[str, Any] = data['target_names']
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase : List[Any] = train_test_split(X, y)
def A ( _lowercase , _lowercase ):
return np.linalg.norm(np.array(_lowercase ) - np.array(_lowercase ) )
def A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=5 ):
SCREAMING_SNAKE_CASE : int = zip(_lowercase , _lowercase )
# List of distances of all points from the point to be classified
SCREAMING_SNAKE_CASE : str = []
for data_point in data:
SCREAMING_SNAKE_CASE : Optional[Any] = euclidean_distance(data_point[0] , _lowercase )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
SCREAMING_SNAKE_CASE : Optional[int] = [i[1] for i in sorted(_lowercase )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
SCREAMING_SNAKE_CASE : Optional[int] = Counter(_lowercase ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4]))
| 258
| 1
|
'''simple docstring'''
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A_ ( tf.keras.layers.Layer ):
def __init__( self : Tuple , snake_case_ : Dict[str, int] , snake_case_ : List[str] , snake_case_ : int = None , snake_case_ : int = None ):
super().__init__()
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = max_length
_UpperCAmelCase = vocab
_UpperCAmelCase = merges
_UpperCAmelCase = BytePairTokenizer(snake_case_ , snake_case_ , sequence_length=snake_case_ )
@classmethod
def lowercase ( cls : Optional[int] , snake_case_ : GPTaTokenizer , *snake_case_ : List[Any] , **snake_case_ : Any ):
_UpperCAmelCase = [" ".join(snake_case_ ) for m in tokenizer.bpe_ranks.keys()]
_UpperCAmelCase = tokenizer.get_vocab()
return cls(snake_case_ , snake_case_ , *snake_case_ , **snake_case_ )
@classmethod
def lowercase ( cls : Optional[int] , snake_case_ : Union[str, os.PathLike] , *snake_case_ : Union[str, Any] , **snake_case_ : List[Any] ):
_UpperCAmelCase = GPTaTokenizer.from_pretrained(snake_case_ , *snake_case_ , **snake_case_ )
return cls.from_tokenizer(snake_case_ , *snake_case_ , **snake_case_ )
@classmethod
def lowercase ( cls : int , snake_case_ : List[Any] ):
return cls(**snake_case_ )
def lowercase ( self : str ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def lowercase ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int = None ):
_UpperCAmelCase = self.tf_tokenizer(snake_case_ )
_UpperCAmelCase = tf.ones_like(snake_case_ )
if self.pad_token_id is not None:
# pad the tokens up to max length
_UpperCAmelCase = max_length if max_length is not None else self.max_length
if max_length is not None:
_UpperCAmelCase , _UpperCAmelCase = pad_model_inputs(
snake_case_ , max_seq_length=snake_case_ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 22
|
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
SCREAMING_SNAKE_CASE__ = 2_5_6
class a_ ( lowerCamelCase ):
lowercase = ["""melgan"""]
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) -> None:
"""simple docstring"""
super().__init__()
# From MELGAN
UpperCamelCase = math.log(1e-5 ) # Matches MelGAN training.
UpperCamelCase = 4.0 # Largest value for most examples
UpperCamelCase = 128
self.register_modules(
notes_encoder=_SCREAMING_SNAKE_CASE , continuous_encoder=_SCREAMING_SNAKE_CASE , decoder=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , melgan=_SCREAMING_SNAKE_CASE , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = output_range
if clip:
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , self.min_value , self.max_value )
# Scale to [0, 1].
UpperCamelCase = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=(-1.0, 1.0) , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase ,UpperCamelCase = input_range
UpperCamelCase = torch.clip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if clip else outputs
# Scale to [0, 1].
UpperCamelCase = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase = input_tokens > 0
UpperCamelCase ,UpperCamelCase = self.notes_encoder(
encoder_input_tokens=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
UpperCamelCase ,UpperCamelCase = self.continuous_encoder(
encoder_inputs=_SCREAMING_SNAKE_CASE , encoder_inputs_mask=_SCREAMING_SNAKE_CASE )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = noise_time
if not torch.is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_SCREAMING_SNAKE_CASE ) and len(timesteps.shape ) == 0:
UpperCamelCase = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCamelCase = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
UpperCamelCase = self.decoder(
encodings_and_masks=_SCREAMING_SNAKE_CASE , decoder_input_tokens=_SCREAMING_SNAKE_CASE , decoder_noise_time=_SCREAMING_SNAKE_CASE )
return logits
@torch.no_grad()
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 100 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "numpy" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
"""simple docstring"""
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_SCREAMING_SNAKE_CASE )}." )
UpperCamelCase = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
UpperCamelCase = np.zeros([1, 0, self.n_dims] , np.floataa )
UpperCamelCase = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
for i, encoder_input_tokens in enumerate(_SCREAMING_SNAKE_CASE ):
if i == 0:
UpperCamelCase = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
UpperCamelCase = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_SCREAMING_SNAKE_CASE , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
UpperCamelCase = ones
UpperCamelCase = self.scale_features(
_SCREAMING_SNAKE_CASE , output_range=[-1.0, 1.0] , clip=_SCREAMING_SNAKE_CASE )
UpperCamelCase = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_SCREAMING_SNAKE_CASE , continuous_mask=_SCREAMING_SNAKE_CASE , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
UpperCamelCase = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_SCREAMING_SNAKE_CASE , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
UpperCamelCase = self.decode(
encodings_and_masks=_SCREAMING_SNAKE_CASE , input_tokens=_SCREAMING_SNAKE_CASE , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
UpperCamelCase = self.scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
UpperCamelCase = self.scale_to_features(_SCREAMING_SNAKE_CASE , input_range=[-1.0, 1.0] )
UpperCamelCase = mel[:1]
UpperCamelCase = mel.cpu().float().numpy()
UpperCamelCase = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
logger.info("""Generated segment""" , _SCREAMING_SNAKE_CASE )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
UpperCamelCase = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
UpperCamelCase = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_SCREAMING_SNAKE_CASE )
| 321
| 0
|
'''simple docstring'''
def snake_case_ ( lowerCAmelCase_ )-> list:
'''simple docstring'''
if len(lowerCamelCase__ ) <= 1:
return lst
_UpperCAmelCase : Any = 1
while i < len(lowerCamelCase__ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
_UpperCAmelCase : Optional[int] = lst[i], lst[i - 1]
i -= 1
if i == 0:
_UpperCAmelCase : List[Any] = 1
return lst
if __name__ == "__main__":
A_ = input("""Enter numbers separated by a comma:\n""").strip()
A_ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 354
|
'''simple docstring'''
from datetime import datetime
import requests
def snake_case_ ( lowerCAmelCase_ )-> bytes:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = """https://downloadgram.net/wp-json/wppress/video-downloader/video?url="""
_UpperCAmelCase : Dict = requests.get(base_url + url ).json()[0]["""urls"""][0]["""src"""]
return requests.get(lowerCAmelCase_ ).content
if __name__ == "__main__":
A_ : Union[str, Any] = input("""Enter Video/IGTV url: """).strip()
A_ : Dict = f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(f"""Done. Video saved to disk as {file_name}.""")
| 349
| 0
|
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowerCamelCase : str = ["""gpt2"""]
_lowerCamelCase : str = """gpt2"""
if is_tf_available():
class UpperCamelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self : Tuple , UpperCAmelCase__ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
super().__init__()
A__ = tokenizer
A__ = AutoConfig.from_pretrained(UpperCAmelCase__)
A__ = TFGPTaLMHeadModel.from_config(UpperCAmelCase__)
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text'''),))
def SCREAMING_SNAKE_CASE ( self : List[str] , UpperCAmelCase__ : List[str]) ->Optional[int]:
'''simple docstring'''
A__ = self.tokenizer(UpperCAmelCase__)
A__ = tokenized['''input_ids'''].to_tensor()
A__ = tf.cast(input_ids_dense > 0 , tf.intaa)
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
A__ = self.model(input_ids=UpperCAmelCase__ , attention_mask=UpperCAmelCase__)['''logits''']
return outputs
@require_tf
@require_keras_nlp
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Dict:
'''simple docstring'''
super().setUp()
A__ = [GPTaTokenizer.from_pretrained(UpperCAmelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS)]
A__ = [TFGPTaTokenizer.from_pretrained(UpperCAmelCase__) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers) == len(self.tf_tokenizers)
A__ = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
A__ = list(zip(self.test_sentences , self.test_sentences[::-1]))
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Dict:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers):
for test_inputs in self.test_sentences:
A__ = tokenizer([test_inputs] , return_tensors='''tf''')
A__ = tf_tokenizer([test_inputs])
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
A__ = python_outputs[key].numpy()
A__ = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape))
self.assertTrue(tf.reduce_all(tf.cast(UpperCAmelCase__ , tf.intaa) == tf_outputs_values))
@slow
def SCREAMING_SNAKE_CASE ( self : int) ->int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A__ = tf.function(UpperCAmelCase__)
for test_inputs in self.test_sentences:
A__ = tf.constant(UpperCAmelCase__)
A__ = compiled_tokenizer(UpperCAmelCase__)
A__ = tf_tokenizer(UpperCAmelCase__)
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key]))
@slow
def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A__ = ModelToSave(tokenizer=UpperCAmelCase__)
A__ = tf.convert_to_tensor([self.test_sentences[0]])
A__ = model.serving(UpperCAmelCase__) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A__ = Path(UpperCAmelCase__) / '''saved.model'''
tf.saved_model.save(UpperCAmelCase__ , UpperCAmelCase__ , signatures={'''serving_default''': model.serving})
A__ = tf.saved_model.load(UpperCAmelCase__)
A__ = loaded_model.signatures['''serving_default'''](UpperCAmelCase__)['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output))
@slow
def SCREAMING_SNAKE_CASE ( self : List[str]) ->Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
A__ = tf.convert_to_tensor([self.test_sentences[0]])
A__ = tf_tokenizer(UpperCAmelCase__) # Build model with some sample inputs
A__ = tf_tokenizer.get_config()
A__ = TFGPTaTokenizer.from_config(UpperCAmelCase__)
A__ = model_from_config(UpperCAmelCase__)
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key]))
@slow
def SCREAMING_SNAKE_CASE ( self : Dict) ->List[Any]:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
A__ = 123_123
for max_length in [3, 5, 1_024]:
A__ = tf.convert_to_tensor([self.test_sentences[0]])
A__ = tf_tokenizer(UpperCAmelCase__ , max_length=UpperCAmelCase__)
A__ = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 14
|
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize("""repo_id""" , ["""canonical_dataset_name""", """org-name/dataset-name"""] )
@pytest.mark.parametrize("""path""" , ["""filename.csv""", """filename with blanks.csv"""] )
@pytest.mark.parametrize("""revision""" , [None, """v2"""] )
def lowerCAmelCase_ (lowerCAmelCase__: Any , lowerCAmelCase__: int , lowerCAmelCase__: Optional[int] ):
"""simple docstring"""
UpperCAmelCase_: int = hf_hub_url(repo_id=lowerCAmelCase__ , path=lowerCAmelCase__ , revision=lowerCAmelCase__ )
assert url == F'https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(lowerCAmelCase__ )}'
| 147
| 0
|
"""simple docstring"""
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 363
|
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :Optional[int] = StableDiffusionPanoramaPipeline
_UpperCAmelCase :List[str] = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase :str = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase :Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_UpperCAmelCase :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _snake_case ( self ):
torch.manual_seed(0 )
lowercase__: Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
lowercase__: List[Any] = DDIMScheduler()
torch.manual_seed(0 )
lowercase__: Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase__: Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
lowercase__: List[str] = CLIPTextModel(_UpperCAmelCase )
lowercase__: int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase__: int = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase=0 ):
lowercase__: int = torch.manual_seed(_UpperCAmelCase )
lowercase__: List[Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[str] = self.get_dummy_components()
lowercase__: Union[str, Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Any = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[str] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _snake_case ( self ):
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.2_5e-3 )
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: str = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: str = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = '''french fries'''
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
lowercase__: Optional[Any] = output.images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: Optional[int] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: Union[str, Any] = self.get_dummy_components()
lowercase__: Optional[Any] = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: str = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Union[str, Any] = sd_pipe(**_UpperCAmelCase , view_batch_size=2 )
lowercase__: List[str] = output.images
lowercase__: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: int = self.get_dummy_components()
lowercase__: List[str] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
lowercase__: Any = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: int = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase__: List[Any] = self.get_dummy_components()
lowercase__: Any = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=_UpperCAmelCase )
lowercase__: Dict = StableDiffusionPanoramaPipeline(**_UpperCAmelCase )
lowercase__: int = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowercase__: Optional[int] = self.get_dummy_inputs(_UpperCAmelCase )
lowercase__: Dict = sd_pipe(**_UpperCAmelCase ).images
lowercase__: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowercase__: List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def _snake_case ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self , _UpperCAmelCase=0 ):
lowercase__: Union[str, Any] = torch.manual_seed(_UpperCAmelCase )
lowercase__: int = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def _snake_case ( self ):
lowercase__: Any = '''stabilityai/stable-diffusion-2-base'''
lowercase__: str = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Dict = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
lowercase__: Optional[Any] = pipe(**_UpperCAmelCase ).images
lowercase__: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _snake_case ( self ):
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_UpperCAmelCase )
lowercase__: Tuple = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: List[str] = self.get_inputs()
lowercase__: Dict = pipe(**_UpperCAmelCase ).images
lowercase__: Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
lowercase__: List[Any] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _snake_case ( self ):
lowercase__: int = 0
def callback_fn(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> None:
lowercase__: List[str] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase__: Dict = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Any = latents[0, -3:, -3:, -1]
lowercase__: List[Any] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase__: Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
lowercase__: Optional[Any] = latents[0, -3:, -3:, -1]
lowercase__: Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase__: int = False
lowercase__: str = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Union[str, Any] = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: Tuple = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: Optional[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
lowercase__: Tuple = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _snake_case ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase__: List[Any] = '''stabilityai/stable-diffusion-2-base'''
lowercase__: Any = DDIMScheduler.from_pretrained(_UpperCAmelCase , subfolder='''scheduler''' )
lowercase__: int = StableDiffusionPanoramaPipeline.from_pretrained(_UpperCAmelCase , scheduler=_UpperCAmelCase , safety_checker=_UpperCAmelCase )
lowercase__: List[Any] = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase__: Any = self.get_inputs()
lowercase__: List[str] = pipe(**_UpperCAmelCase )
lowercase__: Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 2
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase : Optional[Any] = {'configuration_xlnet': ['XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLNetConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = ['XLNetTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['XLNetTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : str = [
'XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLNetForMultipleChoice',
'XLNetForQuestionAnswering',
'XLNetForQuestionAnsweringSimple',
'XLNetForSequenceClassification',
'XLNetForTokenClassification',
'XLNetLMHeadModel',
'XLNetModel',
'XLNetPreTrainedModel',
'load_tf_weights_in_xlnet',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Any = [
'TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLNetForMultipleChoice',
'TFXLNetForQuestionAnsweringSimple',
'TFXLNetForSequenceClassification',
'TFXLNetForTokenClassification',
'TFXLNetLMHeadModel',
'TFXLNetMainLayer',
'TFXLNetModel',
'TFXLNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
_lowercase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 332
|
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowercase__ ( snake_case_ :Union[str, Any]=None ):
if subparsers is not None:
__UpperCAmelCase = subparsers.add_parser('''env''' )
else:
__UpperCAmelCase = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=snake_case_ , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=snake_case_ )
return parser
def lowercase__ ( snake_case_ :List[Any] ):
__UpperCAmelCase = torch.__version__
__UpperCAmelCase = torch.cuda.is_available()
__UpperCAmelCase = is_xpu_available()
__UpperCAmelCase = is_npu_available()
__UpperCAmelCase = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(snake_case_ ):
__UpperCAmelCase = load_config_from_file(args.config_file ).to_dict()
__UpperCAmelCase = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''PyTorch XPU available''': str(snake_case_ ),
'''PyTorch NPU available''': str(snake_case_ ),
'''System RAM''': F'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
__UpperCAmelCase = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([F'''- {prop}: {val}''' for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
__UpperCAmelCase = (
'''\n'''.join([F'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(snake_case_ , snake_case_ )
else F'''\t{accelerate_config}'''
)
print(snake_case_ )
__UpperCAmelCase = accelerate_config
return info
def lowercase__ ( ):
__UpperCAmelCase = env_command_parser()
__UpperCAmelCase = parser.parse_args()
env_command(snake_case_ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 332
| 1
|
'''simple docstring'''
snake_case__ : Optional[Any] = tuple[float, float, float]
snake_case__ : Tuple = tuple[float, float, float]
def _lowerCamelCase ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad ):
"""simple docstring"""
UpperCAmelCase_ : Any = end_pointa[0] - end_pointa[0]
UpperCAmelCase_ : Optional[Any] = end_pointa[1] - end_pointa[1]
UpperCAmelCase_ : Any = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _lowerCamelCase ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : Vectorad ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase_ : Optional[Any] = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase_ : Dict = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _lowerCamelCase ( lowerCamelCase_ : Vectorad , lowerCamelCase_ : int ):
"""simple docstring"""
return tuple(round(lowerCamelCase_ , lowerCamelCase_ ) for x in vector ) == (0, 0, 0)
def _lowerCamelCase ( lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : Pointad , lowerCamelCase_ : int = 10 ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
UpperCAmelCase_ : List[Any] = create_vector(lowerCamelCase_ , lowerCamelCase_ )
return is_zero_vector(get_ad_vectors_cross(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ )
| 274
|
'''simple docstring'''
def _lowerCamelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
return int(input_a == input_a == 0 )
def _lowerCamelCase ( ):
"""simple docstring"""
print('Truth Table of NOR Gate:' )
print('| Input 1 | Input 2 | Output |' )
print(F'''| 0 | 0 | {nor_gate(0 , 0 )} |''' )
print(F'''| 0 | 1 | {nor_gate(0 , 1 )} |''' )
print(F'''| 1 | 0 | {nor_gate(1 , 0 )} |''' )
print(F'''| 1 | 1 | {nor_gate(1 , 1 )} |''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 274
| 1
|
'''simple docstring'''
from collections.abc import Sequence
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
return sum(c * (x**i) for i, c in enumerate(UpperCAmelCase ) )
def __a ( UpperCAmelCase , UpperCAmelCase ) ->float:
"""simple docstring"""
A = 0.0
for coeff in reversed(UpperCAmelCase ):
A = result * x + coeff
return result
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
_lowerCamelCase : List[Any] = 1_0.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 258
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
_lowerCamelCase : Dict = {
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : List[str] = [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = ['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
_lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 258
| 1
|
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowercase :
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=1_3 , lowerCAmelCase__=7 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=9_9 , lowerCAmelCase__=3_2 , lowerCAmelCase__=5 , lowerCAmelCase__=4 , lowerCAmelCase__=3_7 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=3_2 , lowerCAmelCase__=1_6 , lowerCAmelCase__=2 , lowerCAmelCase__=0.02 , lowerCAmelCase__=3 , lowerCAmelCase__=4 , lowerCAmelCase__=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : int = seq_length
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : List[Any] = use_input_mask
SCREAMING_SNAKE_CASE_ : Any = use_token_type_ids
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Dict = vocab_size
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : List[str] = hidden_act
SCREAMING_SNAKE_CASE_ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE_ : int = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : List[Any] = num_labels
SCREAMING_SNAKE_CASE_ : Any = num_choices
SCREAMING_SNAKE_CASE_ : int = scope
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ : str = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ : Tuple = None
SCREAMING_SNAKE_CASE_ : int = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_a , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
SCREAMING_SNAKE_CASE_
) : int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = NezhaModel(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(_a , attention_mask=_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_a , token_type_ids=_a )
SCREAMING_SNAKE_CASE_ : List[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = NezhaModel(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , encoder_attention_mask=_a , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(
_a , attention_mask=_a , token_type_ids=_a , encoder_hidden_states=_a , )
SCREAMING_SNAKE_CASE_ : Any = model(_a , attention_mask=_a , token_type_ids=_a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = NezhaForMaskedLM(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : str = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = NezhaForNextSentencePrediction(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = NezhaForPreTraining(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , next_sentence_label=_a , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = NezhaForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(
_a , attention_mask=_a , token_type_ids=_a , start_positions=_a , end_positions=_a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.num_labels
SCREAMING_SNAKE_CASE_ : List[str] = NezhaForSequenceClassification(_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : List[str] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE_ : Any = NezhaForTokenClassification(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(_a , attention_mask=_a , token_type_ids=_a , labels=_a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.num_choices
SCREAMING_SNAKE_CASE_ : Tuple = NezhaForMultipleChoice(config=_a )
model.to(_a )
model.eval()
SCREAMING_SNAKE_CASE_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
SCREAMING_SNAKE_CASE_ : Tuple = model(
_a , attention_mask=_a , token_type_ids=_a , labels=_a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.prepare_config_and_inputs()
(
SCREAMING_SNAKE_CASE_
) : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[Any] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __lowercase (__lowercase , __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_UpperCAmelCase = (
{
"feature-extraction": NezhaModel,
"fill-mask": NezhaForMaskedLM,
"question-answering": NezhaForQuestionAnswering,
"text-classification": NezhaForSequenceClassification,
"token-classification": NezhaForTokenClassification,
"zero-shot": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_UpperCAmelCase = True
def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = super()._prepare_for_class(_a , _a , return_labels=_a )
if return_labels:
if model_class in get_values(_a ):
SCREAMING_SNAKE_CASE_ : Dict = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_a )
SCREAMING_SNAKE_CASE_ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_a )
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = NezhaModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConfigTester(self , config_class=_a , hidden_size=3_7 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
(
SCREAMING_SNAKE_CASE_
) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE_ : int = None
self.model_tester.create_and_check_model_as_decoder(
_a , _a , _a , _a , _a , _a , _a , _a , _a , )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Dict = NezhaModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@slow
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : List[Any] = model_class(config=_a )
SCREAMING_SNAKE_CASE_ : Optional[int] = self._prepare_for_class(_a , _a )
SCREAMING_SNAKE_CASE_ : Tuple = torch.jit.trace(
_a , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_a , os.path.join(_a , 'bert.pt' ) )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.jit.load(os.path.join(_a , 'bert.pt' ) , map_location=_a )
loaded(inputs_dict['input_ids'].to(_a ) , inputs_dict['attention_mask'].to(_a ) )
@require_torch
class __lowercase (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = NezhaModel.from_pretrained('sijunhe/nezha-cn-base' )
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : str = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(_a , attention_mask=_a )[0]
SCREAMING_SNAKE_CASE_ : Any = torch.Size((1, 6, 7_6_8) )
self.assertEqual(output.shape , _a )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[[0.0_685, 0.2_441, 0.1_102], [0.0_600, 0.1_906, 0.1_349], [0.0_221, 0.0_819, 0.0_586]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = NezhaForMaskedLM.from_pretrained('sijunhe/nezha-cn-base' )
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE_ : int = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(_a , attention_mask=_a )[0]
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size((1, 6, 2_1_1_2_8) )
self.assertEqual(output.shape , _a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor(
[[-2.7_939, -1.7_902, -2.2_189], [-2.8_585, -1.8_908, -2.3_723], [-2.6_499, -1.7_750, -2.2_558]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _a , atol=1E-4 ) )
| 365
|
import socket
def a__ ( ):
SCREAMING_SNAKE_CASE_ : Dict = socket.socket(socket.AF_INET, socket.SOCK_STREAM )
SCREAMING_SNAKE_CASE_ : Any = socket.gethostname()
SCREAMING_SNAKE_CASE_ : List[str] = 1_2_3_1_2
sock.connect((host, port) )
sock.send(B'Hello server!' )
with open('Received_file', 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
SCREAMING_SNAKE_CASE_ : Tuple = sock.recv(1_0_2_4 )
if not data:
break
out_file.write(A__ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 162
| 0
|
import colorsys
from PIL import Image # type: ignore
def a__ ( _UpperCamelCase : float ,_UpperCamelCase : float ,_UpperCamelCase : int ):
__lowerCamelCase = x
__lowerCamelCase = y
for step in range(_UpperCamelCase ): # noqa: B007
__lowerCamelCase = a * a - b * b + x
__lowerCamelCase = 2 * a * b + y
__lowerCamelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def a__ ( _UpperCamelCase : float ):
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def a__ ( _UpperCamelCase : float ):
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(_UpperCamelCase ,1 ,1 ) )
def a__ ( _UpperCamelCase : int = 8_00 ,_UpperCamelCase : int = 6_00 ,_UpperCamelCase : float = -0.6 ,_UpperCamelCase : float = 0 ,_UpperCamelCase : float = 3.2 ,_UpperCamelCase : int = 50 ,_UpperCamelCase : bool = True ,):
__lowerCamelCase = Image.new('''RGB''' ,(image_width, image_height) )
__lowerCamelCase = img.load()
# loop through the image-coordinates
for image_x in range(_UpperCamelCase ):
for image_y in range(_UpperCamelCase ):
# determine the figure-coordinates based on the image-coordinates
__lowerCamelCase = figure_width / image_width * image_height
__lowerCamelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
__lowerCamelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
__lowerCamelCase = get_distance(_UpperCamelCase ,_UpperCamelCase ,_UpperCamelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
__lowerCamelCase = get_color_coded_rgb(_UpperCamelCase )
else:
__lowerCamelCase = get_black_and_white_rgb(_UpperCamelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a_ = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 330
|
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __lowerCAmelCase ( lowerCAmelCase__ ):
def __init__( self , __UpperCAmelCase=0.01 , __UpperCAmelCase=1000 ):
'''simple docstring'''
__lowerCamelCase = p_stop
__lowerCamelCase = max_length
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = 0
__lowerCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__lowerCamelCase = random.random() < self.p_stop
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=True ):
'''simple docstring'''
__lowerCamelCase = [
BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
for i in range(2 )
]
__lowerCamelCase = [list(__UpperCAmelCase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(__UpperCAmelCase ) for shard in batch_sampler_shards] , [len(__UpperCAmelCase ) for e in expected] )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of total batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
# Check the shards when the dataset is a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=__UpperCAmelCase )
# Expected shouldn't change
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size.
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
# Check the shards when the dataset is very small.
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
__lowerCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = [[], []]
self.check_batch_sampler_shards(__UpperCAmelCase , __UpperCAmelCase , split_batches=__UpperCAmelCase , even_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__lowerCamelCase = [BatchSamplerShard(__UpperCAmelCase , 2 , __UpperCAmelCase , even_batches=__UpperCAmelCase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def lowerCamelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=2 , __UpperCAmelCase=False ):
'''simple docstring'''
random.seed(__UpperCAmelCase )
__lowerCamelCase = list(__UpperCAmelCase )
__lowerCamelCase = [
IterableDatasetShard(
__UpperCAmelCase , batch_size=__UpperCAmelCase , drop_last=__UpperCAmelCase , num_processes=__UpperCAmelCase , process_index=__UpperCAmelCase , split_batches=__UpperCAmelCase , )
for i in range(__UpperCAmelCase )
]
__lowerCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(__UpperCAmelCase )
iterable_dataset_lists.append(list(__UpperCAmelCase ) )
__lowerCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__lowerCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(len(__UpperCAmelCase ) % shard_batch_size == 0 )
__lowerCamelCase = []
for idx in range(0 , len(__UpperCAmelCase ) , __UpperCAmelCase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(__UpperCAmelCase ) < len(__UpperCAmelCase ):
reference += reference
self.assertListEqual(__UpperCAmelCase , reference[: len(__UpperCAmelCase )] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = 42
__lowerCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
# Edge case with a very small dataset
__lowerCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
self.check_iterable_dataset_shards(__UpperCAmelCase , __UpperCAmelCase , batch_size=4 , drop_last=__UpperCAmelCase , split_batches=__UpperCAmelCase )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=__UpperCAmelCase )
__lowerCamelCase = SkipBatchSampler(__UpperCAmelCase , 2 )
self.assertListEqual(list(__UpperCAmelCase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
__lowerCamelCase = skip_first_batches(__UpperCAmelCase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def lowerCamelCase ( self ):
'''simple docstring'''
Accelerator()
__lowerCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(__UpperCAmelCase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 330
| 1
|
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowercase :
def __init__( self : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any]=13 , _UpperCamelCase : int=7 , _UpperCamelCase : Tuple=True , _UpperCamelCase : Dict=True , _UpperCamelCase : Union[str, Any]=99 , _UpperCamelCase : Tuple=32 , _UpperCamelCase : Tuple=5 , _UpperCamelCase : Tuple=4 , _UpperCamelCase : List[Any]=37 , _UpperCamelCase : Any="gelu" , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[str]=0.1 , _UpperCamelCase : List[str]=50 , _UpperCamelCase : List[Any]=0.0_2 , _UpperCamelCase : int=True , _UpperCamelCase : Dict=None , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = seq_length
SCREAMING_SNAKE_CASE = is_training
SCREAMING_SNAKE_CASE = use_input_mask
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = intermediate_size
SCREAMING_SNAKE_CASE = hidden_act
SCREAMING_SNAKE_CASE = hidden_dropout_prob
SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE = max_position_embeddings
SCREAMING_SNAKE_CASE = initializer_range
SCREAMING_SNAKE_CASE = use_labels
SCREAMING_SNAKE_CASE = scope
def __snake_case( self : Any ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE = self.get_config()
return config, input_ids, input_mask, token_labels
def __snake_case( self : Tuple ) -> Dict:
'''simple docstring'''
return BertGenerationConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def __snake_case( self : List[Any] ) -> int:
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __snake_case( self : int , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , **_UpperCamelCase : Tuple , ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BertGenerationEncoder(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict , **_UpperCamelCase : List[str] , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = BertGenerationEncoder(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , )
SCREAMING_SNAKE_CASE = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case( self : Tuple , _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , **_UpperCamelCase : str , ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = BertGenerationDecoder(config=_lowercase ).to(_lowercase ).eval()
# first forward pass
SCREAMING_SNAKE_CASE = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , use_cache=_lowercase , )
SCREAMING_SNAKE_CASE = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
SCREAMING_SNAKE_CASE = model(
_lowercase , attention_mask=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["hidden_states"][0]
# select random slice
SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1e-3 ) )
def __snake_case( self : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , *_UpperCamelCase : List[Any] , ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BertGenerationDecoder(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowercase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Optional[Any] = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
lowercase__ : Dict = (BertGenerationDecoder,) if is_torch_available() else ()
lowercase__ : Dict = (
{"feature-extraction": BertGenerationEncoder, "text-generation": BertGenerationDecoder}
if is_torch_available()
else {}
)
def __snake_case( self : Dict ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BertGenerationEncoderTester(self )
SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __snake_case( self : Any ) -> Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __snake_case( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def __snake_case( self : Dict ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE = "bert"
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase , _lowercase )
def __snake_case( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_lowercase )
def __snake_case( self : Tuple ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowercase )
def __snake_case( self : str ) -> Dict:
'''simple docstring'''
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE = None
self.model_tester.create_and_check_model_as_decoder(
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , )
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def __snake_case( self : str ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
self.assertIsNotNone(_lowercase )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BertGenerationEncoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
SCREAMING_SNAKE_CASE = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_lowercase )[0]
SCREAMING_SNAKE_CASE = torch.Size([1, 8, 1_024] )
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[0.1_7_7_5, 0.0_0_8_3, -0.0_3_2_1], [1.6_0_0_2, 0.1_2_8_7, 0.3_9_1_2], [2.1_4_7_3, 0.5_7_9_1, 0.6_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def __snake_case( self : List[str] ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = BertGenerationDecoder.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder" )
SCREAMING_SNAKE_CASE = torch.tensor([[101, 7_592, 1_010, 2_026, 3_899, 2_003, 10_140, 102]] )
with torch.no_grad():
SCREAMING_SNAKE_CASE = model(_lowercase )[0]
SCREAMING_SNAKE_CASE = torch.Size([1, 8, 50_358] )
self.assertEqual(output.shape , _lowercase )
SCREAMING_SNAKE_CASE = torch.tensor(
[[[-0.5_7_8_8, -2.5_9_9_4, -3.7_0_5_4], [0.0_4_3_8, 4.7_9_9_7, 1.8_7_9_5], [1.5_8_6_2, 6.6_4_0_9, 4.4_6_3_8]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=1e-4 ) )
| 370
|
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : str = logging.get_logger(__name__)
def __lowerCamelCase (UpperCAmelCase__ : Any , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : List[str] ):
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def __lowerCamelCase (UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] ):
SCREAMING_SNAKE_CASE = to_pil_image(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = pil_image.size
SCREAMING_SNAKE_CASE = pytesseract.image_to_data(UpperCAmelCase__ , lang=UpperCAmelCase__ , output_type="dict" , config=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(UpperCAmelCase__ ) if not word.strip()]
SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(UpperCAmelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(UpperCAmelCase__ )
# finally, normalize the bounding boxes
SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) )
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class lowercase ( a ):
lowercase__ : Optional[int] = ["""pixel_values"""]
def __init__( self : int , _UpperCamelCase : bool = True , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : bool = True , _UpperCamelCase : float = 1 / 255 , _UpperCamelCase : bool = True , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = True , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = "" , **_UpperCamelCase : Union[str, Any] , ) -> None:
'''simple docstring'''
super().__init__(**_UpperCamelCase )
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_value
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
SCREAMING_SNAKE_CASE = apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config
def __snake_case( self : Dict , _UpperCamelCase : np.ndarray , _UpperCamelCase : Dict[str, int] , _UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : List[Any] , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}" )
SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Union[str, Any] , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, float] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : int , _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Union[float, Iterable[float]] , _UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **_UpperCamelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def __snake_case( self : Tuple , _UpperCamelCase : ImageInput , _UpperCamelCase : bool = None , _UpperCamelCase : Dict[str, int] = None , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : bool = None , _UpperCamelCase : float = None , _UpperCamelCase : bool = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : Union[float, Iterable[float]] = None , _UpperCamelCase : bool = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[Union[str, TensorType]] = None , _UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **_UpperCamelCase : List[Any] , ) -> PIL.Image.Image:
'''simple docstring'''
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(_UpperCamelCase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
SCREAMING_SNAKE_CASE = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("If do_normalize is True, image_mean and image_std must be specified." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(_UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , "pytesseract" )
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = []
for image in images:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
SCREAMING_SNAKE_CASE = BatchFeature(data={"pixel_values": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
SCREAMING_SNAKE_CASE = words_batch
SCREAMING_SNAKE_CASE = boxes_batch
return data
| 206
| 0
|
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( lowercase_, lowercase_, unittest.TestCase ):
lowerCamelCase_ : Optional[int] = StableDiffusionPanoramaPipeline
lowerCamelCase_ : Optional[int] = TEXT_TO_IMAGE_PARAMS
lowerCamelCase_ : Union[str, Any] = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCamelCase_ : Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase_ : List[str] = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase (self ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
snake_case_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
snake_case_ : List[Any] = DDIMScheduler()
torch.manual_seed(0 )
snake_case_ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
snake_case_ : Tuple = CLIPTextModel(__magic_name__ )
snake_case_ : int = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCamelCase (self , __magic_name__ , __magic_name__=0 ) -> List[str]:
'''simple docstring'''
snake_case_ : str = torch.manual_seed(__magic_name__ )
snake_case_ : Tuple = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
# Setting height and width to None to prevent OOMs on CPU.
'''height''': None,
'''width''': None,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Optional[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : List[str] = self.get_dummy_components()
snake_case_ : Optional[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ )
snake_case_ : Optional[Any] = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : List[str] = self.get_dummy_inputs(__magic_name__ )
snake_case_ : Dict = sd_pipe(**__magic_name__ ).images
snake_case_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[Any] = np.array([0.6_186, 0.5_374, 0.4_915, 0.4_135, 0.4_114, 0.4_563, 0.5_128, 0.4_977, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase (self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def lowerCamelCase (self ) -> List[Any]:
'''simple docstring'''
snake_case_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : List[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ )
snake_case_ : int = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = self.get_dummy_inputs(__magic_name__ )
snake_case_ : Dict = '''french fries'''
snake_case_ : Tuple = sd_pipe(**__magic_name__ , negative_prompt=__magic_name__ )
snake_case_ : Union[str, Any] = output.images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Union[str, Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Optional[Any] = self.get_dummy_components()
snake_case_ : Tuple = StableDiffusionPanoramaPipeline(**__magic_name__ )
snake_case_ : Dict = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : List[str] = self.get_dummy_inputs(__magic_name__ )
snake_case_ : int = sd_pipe(**__magic_name__ , view_batch_size=2 )
snake_case_ : Optional[Any] = output.images
snake_case_ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : Union[str, Any] = np.array([0.6_187, 0.5_375, 0.4_915, 0.4_136, 0.4_114, 0.4_563, 0.5_128, 0.4_976, 0.4_757] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : Any = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Union[str, Any] = self.get_dummy_components()
snake_case_ : List[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' )
snake_case_ : Dict = StableDiffusionPanoramaPipeline(**__magic_name__ )
snake_case_ : str = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : int = self.get_dummy_inputs(__magic_name__ )
snake_case_ : str = sd_pipe(**__magic_name__ ).images
snake_case_ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[Any] = np.array([0.4_024, 0.6_510, 0.4_901, 0.5_378, 0.5_813, 0.5_622, 0.4_795, 0.4_467, 0.4_952] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCamelCase (self ) -> Tuple:
'''simple docstring'''
snake_case_ : int = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case_ : Union[str, Any] = self.get_dummy_components()
snake_case_ : Optional[int] = PNDMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , skip_prk_steps=__magic_name__ )
snake_case_ : List[Any] = StableDiffusionPanoramaPipeline(**__magic_name__ )
snake_case_ : str = sd_pipe.to(__magic_name__ )
sd_pipe.set_progress_bar_config(disable=__magic_name__ )
snake_case_ : Optional[Any] = self.get_dummy_inputs(__magic_name__ )
snake_case_ : Dict = sd_pipe(**__magic_name__ ).images
snake_case_ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
snake_case_ : List[Any] = np.array([0.6_391, 0.6_291, 0.4_861, 0.5_134, 0.5_552, 0.4_578, 0.5_032, 0.5_023, 0.4_539] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase (self ) -> Union[str, Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase (self , __magic_name__=0 ) -> Union[str, Any]:
'''simple docstring'''
snake_case_ : Union[str, Any] = torch.manual_seed(__magic_name__ )
snake_case_ : Union[str, Any] = {
'''prompt''': '''a photo of the dolomites''',
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowerCamelCase (self ) -> Optional[int]:
'''simple docstring'''
snake_case_ : Tuple = '''stabilityai/stable-diffusion-2-base'''
snake_case_ : Any = DDIMScheduler.from_pretrained(__magic_name__ , subfolder='''scheduler''' )
snake_case_ : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
snake_case_ : Dict = self.get_inputs()
snake_case_ : Any = pipe(**__magic_name__ ).images
snake_case_ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
snake_case_ : List[Any] = np.array(
[
0.36_968_392,
0.27_025_372,
0.32_446_766,
0.28_379_387,
0.36_363_274,
0.30_733_347,
0.27_100_027,
0.27_054_125,
0.25_536_096,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=__magic_name__ )
snake_case_ : List[str] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
snake_case_ : int = self.get_inputs()
snake_case_ : str = pipe(**__magic_name__ ).images
snake_case_ : List[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 2048, 3)
snake_case_ : Optional[int] = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def lowerCamelCase (self ) -> int:
'''simple docstring'''
snake_case_ : List[str] = 0
def callback_fn(__magic_name__ , __magic_name__ , __magic_name__ ) -> None:
snake_case_ : Optional[int] = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case_ : List[Any] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case_ : Optional[int] = latents[0, -3:, -3:, -1]
snake_case_ : List[str] = np.array(
[
0.18_681_869,
0.33_907_816,
0.5_361_276,
0.14_432_865,
-0.02_856_611,
-0.73_941_123,
0.23_397_987,
0.47_322_682,
-0.37_823_164,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case_ : int = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 256)
snake_case_ : str = latents[0, -3:, -3:, -1]
snake_case_ : Any = np.array(
[
0.18_539_645,
0.33_987_248,
0.5_378_559,
0.14_437_142,
-0.02_455_261,
-0.7_338_317,
0.23_990_755,
0.47_356_272,
-0.3_786_505,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case_ : str = False
snake_case_ : Tuple = '''stabilityai/stable-diffusion-2-base'''
snake_case_ : Union[str, Any] = DDIMScheduler.from_pretrained(__magic_name__ , subfolder='''scheduler''' )
snake_case_ : Any = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
snake_case_ : Tuple = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing()
snake_case_ : int = self.get_inputs()
pipe(**__magic_name__ , callback=__magic_name__ , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def lowerCamelCase (self ) -> str:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case_ : List[Any] = '''stabilityai/stable-diffusion-2-base'''
snake_case_ : Tuple = DDIMScheduler.from_pretrained(__magic_name__ , subfolder='''scheduler''' )
snake_case_ : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(__magic_name__ , scheduler=__magic_name__ , safety_checker=__magic_name__ )
snake_case_ : Optional[Any] = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case_ : int = self.get_inputs()
snake_case_ : Optional[int] = pipe(**__magic_name__ )
snake_case_ : Optional[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 10**9
| 279
|
'''simple docstring'''
from ....utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=2048 ):
'''simple docstring'''
lowercase__ = config.__dict__
lowercase__ = modal_hidden_size
if num_labels:
lowercase__ = num_labels
| 2
| 0
|
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def UpperCamelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , **UpperCAmelCase ) ->Dict:
"""simple docstring"""
a_ = [x.strip() for x in open(UpperCAmelCase ).readlines()]
a_ = [x.strip() for x in open(UpperCAmelCase ).readlines()][: len(UpperCAmelCase )]
a_ = calculate_rouge(UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase )
if save_path is not None:
save_json(UpperCAmelCase , UpperCAmelCase , indent=UpperCAmelCase )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 366
|
"""simple docstring"""
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class snake_case :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=24 , __UpperCAmelCase=2 , __UpperCAmelCase=6 , __UpperCAmelCase=37 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=5_12 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=None , __UpperCAmelCase=10_00 , ) ->List[str]:
a_ = parent
a_ = batch_size
a_ = seq_length
a_ = is_training
a_ = use_input_mask
a_ = use_token_type_ids
a_ = use_labels
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_act
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = type_vocab_size
a_ = type_sequence_label_size
a_ = initializer_range
a_ = num_labels
a_ = scope
a_ = range_bbox
def UpperCAmelCase__ ( self) ->int:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a_ = bbox[i, j, 3]
a_ = bbox[i, j, 1]
a_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a_ = bbox[i, j, 2]
a_ = bbox[i, j, 0]
a_ = t
a_ = None
if self.use_input_mask:
a_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
a_ = None
if self.use_token_type_ids:
a_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
a_ = None
a_ = None
if self.use_labels:
a_ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self) ->List[str]:
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Any:
a_ = LiltModel(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase , token_type_ids=__UpperCAmelCase)
a_ = model(__UpperCAmelCase , bbox=__UpperCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Union[str, Any]:
a_ = self.num_labels
a_ = LiltForTokenClassification(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , labels=__UpperCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , ) ->Dict:
a_ = LiltForQuestionAnswering(config=__UpperCAmelCase)
model.to(__UpperCAmelCase)
model.eval()
a_ = model(
__UpperCAmelCase , bbox=__UpperCAmelCase , attention_mask=__UpperCAmelCase , token_type_ids=__UpperCAmelCase , start_positions=__UpperCAmelCase , end_positions=__UpperCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self) ->str:
a_ = self.prepare_config_and_inputs()
(
(
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) , (
a_
) ,
) = config_and_inputs
a_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a_ : List[Any] = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : List[str] = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : Any = False
a_ : Dict = False
def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase) ->int:
return True
def UpperCAmelCase__ ( self) ->str:
a_ = LiltModelTester(self)
a_ = ConfigTester(self , config_class=__UpperCAmelCase , hidden_size=37)
def UpperCAmelCase__ ( self) ->List[Any]:
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self) ->Tuple:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->Dict:
a_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a_ = type
self.model_tester.create_and_check_model(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->List[str]:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCAmelCase)
def UpperCAmelCase__ ( self) ->str:
a_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCAmelCase)
@slow
def UpperCAmelCase__ ( self) ->List[Any]:
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ = LiltModel.from_pretrained(__UpperCAmelCase)
self.assertIsNotNone(__UpperCAmelCase)
@require_torch
@slow
class snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self) ->List[Any]:
a_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base").to(__UpperCAmelCase)
a_ = torch.tensor([[1, 2]] , device=__UpperCAmelCase)
a_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=__UpperCAmelCase)
# forward pass
with torch.no_grad():
a_ = model(input_ids=__UpperCAmelCase , bbox=__UpperCAmelCase)
a_ = torch.Size([1, 2, 7_68])
a_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=__UpperCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , __UpperCAmelCase)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , __UpperCAmelCase , atol=1E-3))
| 303
| 0
|
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class A :
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : int=3 , __lowerCAmelCase : Optional[int]=32 , __lowerCAmelCase : str=3 , __lowerCAmelCase : Tuple=10 , __lowerCAmelCase : Tuple=[8, 16, 32, 64] , __lowerCAmelCase : int=[1, 1, 2, 1] , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : List[Any]=True , __lowerCAmelCase : Tuple="relu" , __lowerCAmelCase : Union[str, Any]=3 , __lowerCAmelCase : Optional[int]=None , __lowerCAmelCase : Any=["stage2", "stage3", "stage4"] , __lowerCAmelCase : Dict=[2, 3, 4] , __lowerCAmelCase : Optional[Any]=1 , ) -> Any:
"""simple docstring"""
A__ = parent
A__ = batch_size
A__ = image_size
A__ = num_channels
A__ = embeddings_size
A__ = hidden_sizes
A__ = depths
A__ = is_training
A__ = use_labels
A__ = hidden_act
A__ = num_labels
A__ = scope
A__ = len(__lowerCAmelCase )
A__ = out_features
A__ = out_indices
A__ = num_groups
def a_ ( self : str ) -> Dict:
"""simple docstring"""
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.num_labels )
A__ = self.get_config()
return config, pixel_values, labels
def a_ ( self : Dict ) -> Tuple:
"""simple docstring"""
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def a_ ( self : Any , __lowerCAmelCase : List[str] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
A__ = BitModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def a_ ( self : List[Any] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int ) -> Any:
"""simple docstring"""
A__ = self.num_labels
A__ = BitForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def a_ ( self : Any , __lowerCAmelCase : Tuple , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple ) -> Dict:
"""simple docstring"""
A__ = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A__ = None
A__ = BitBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A__ = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def a_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
A__ = self.prepare_config_and_inputs()
A__ , A__ , A__ = config_and_inputs
A__ = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class A (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : Dict = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
__lowerCamelCase : Tuple = (
{'''feature-extraction''': BitModel, '''image-classification''': BitForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : int = False
__lowerCamelCase : Optional[Any] = False
__lowerCamelCase : Optional[int] = False
__lowerCamelCase : List[str] = False
__lowerCamelCase : List[str] = False
def a_ ( self : Any ) -> Any:
"""simple docstring"""
A__ = BitModelTester(self )
A__ = ConfigTester(self , config_class=__lowerCAmelCase , has_text_modality=__lowerCAmelCase )
def a_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def a_ ( self : str ) -> Dict:
"""simple docstring"""
return
@unittest.skip(reason="""Bit does not output attentions""" )
def a_ ( self : Dict ) -> str:
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not use inputs_embeds""" )
def a_ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""Bit does not support input and output embeddings""" )
def a_ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
pass
def a_ ( self : str ) -> Optional[int]:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(__lowerCAmelCase )
A__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCAmelCase )
def a_ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def a_ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__lowerCAmelCase )
def a_ ( self : str ) -> List[str]:
"""simple docstring"""
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(config=__lowerCAmelCase )
for name, module in model.named_modules():
if isinstance(__lowerCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
def a_ ( self : Tuple ) -> List[str]:
"""simple docstring"""
def check_hidden_states_output(__lowerCAmelCase : str , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Union[str, Any] ):
A__ = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
A__ = model(**self._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase ) )
A__ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A__ = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = ["""preactivation""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
A__ = layer_type
A__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A__ = True
check_hidden_states_output(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
@unittest.skip(reason="""Bit does not use feedforward chunking""" )
def a_ ( self : Tuple ) -> str:
"""simple docstring"""
pass
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def a_ ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = BitModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class A (unittest.TestCase ):
'''simple docstring'''
@cached_property
def a_ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def a_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
A__ = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowerCAmelCase )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=__lowerCAmelCase , return_tensors="""pt""" ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
A__ = model(**__lowerCAmelCase )
# verify the logits
A__ = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCAmelCase )
A__ = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCAmelCase , atol=1e-4 ) )
@require_torch
class A (SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : str = (BitBackbone,) if is_torch_available() else ()
__lowerCamelCase : str = BitConfig
__lowerCamelCase : List[Any] = False
def a_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
A__ = BitModelTester(self )
| 274
|
from math import ceil
def __lowerCamelCase ( __a :int = 1_0_0_1 ) -> int:
"""simple docstring"""
A__ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
A__ = 2 * i + 1
A__ = 2 * i
A__ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
A : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print('''Invalid entry - please enter a number''')
| 274
| 1
|
"""simple docstring"""
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
SCREAMING_SNAKE_CASE_ : str = 0
SCREAMING_SNAKE_CASE_ : Any = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
SCREAMING_SNAKE_CASE_ : List[str] = tuple[int, int]
class a :
"""simple docstring"""
def __init__( self: List[str] , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: int , UpperCamelCase: Node | None , ):
"""simple docstring"""
A__ = pos_x
A__ = pos_y
A__ = (pos_y, pos_x)
A__ = goal_x
A__ = goal_y
A__ = g_cost
A__ = parent
A__ = self.calculate_heuristic()
A__ = self.g_cost + self.h_cost
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.pos_x - self.goal_x
A__ = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(UpperCamelCase ) + abs(UpperCamelCase )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self: Optional[Any] , UpperCamelCase: Node ):
"""simple docstring"""
return self.f_cost < other.f_cost
class a :
"""simple docstring"""
def __init__( self: Tuple , UpperCamelCase: TPosition , UpperCamelCase: TPosition ):
"""simple docstring"""
A__ = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase )
A__ = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , UpperCamelCase )
A__ = [self.start]
A__ = []
A__ = False
def UpperCamelCase ( self: int ):
"""simple docstring"""
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
A__ = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(UpperCamelCase )
self.closed_nodes.append(UpperCamelCase )
A__ = self.get_successors(UpperCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase )
else:
# retrieve the best current path
A__ = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase )
else:
self.open_nodes.append(UpperCamelCase )
return [self.start.pos]
def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Node ):
"""simple docstring"""
A__ = []
for action in delta:
A__ = parent.pos_x + action[1]
A__ = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase , UpperCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase , ) )
return successors
def UpperCamelCase ( self: int , UpperCamelCase: Node | None ):
"""simple docstring"""
A__ = node
A__ = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
A__ = current_node.parent
path.reverse()
return path
class a :
"""simple docstring"""
def __init__( self: int , UpperCamelCase: TPosition , UpperCamelCase: TPosition ):
"""simple docstring"""
A__ = AStar(UpperCamelCase , UpperCamelCase )
A__ = AStar(UpperCamelCase , UpperCamelCase )
A__ = False
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
A__ = self.fwd_astar.open_nodes.pop(0 )
A__ = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
UpperCamelCase , UpperCamelCase )
self.fwd_astar.closed_nodes.append(UpperCamelCase )
self.bwd_astar.closed_nodes.append(UpperCamelCase )
A__ = current_bwd_node
A__ = current_fwd_node
A__ = {
self.fwd_astar: self.fwd_astar.get_successors(UpperCamelCase ),
self.bwd_astar: self.bwd_astar.get_successors(UpperCamelCase ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(UpperCamelCase )
else:
# retrieve the best current path
A__ = astar.open_nodes.pop(
astar.open_nodes.index(UpperCamelCase ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(UpperCamelCase )
else:
astar.open_nodes.append(UpperCamelCase )
return [self.fwd_astar.start.pos]
def UpperCamelCase ( self: List[str] , UpperCamelCase: Node , UpperCamelCase: Node ):
"""simple docstring"""
A__ = self.fwd_astar.retrace_path(UpperCamelCase )
A__ = self.bwd_astar.retrace_path(UpperCamelCase )
bwd_path.pop()
bwd_path.reverse()
A__ = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
SCREAMING_SNAKE_CASE_ : Any = (0, 0)
SCREAMING_SNAKE_CASE_ : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
SCREAMING_SNAKE_CASE_ : List[str] = time.time()
SCREAMING_SNAKE_CASE_ : int = AStar(init, goal)
SCREAMING_SNAKE_CASE_ : int = a_star.search()
SCREAMING_SNAKE_CASE_ : Any = time.time() - start_time
print(f"""AStar execution time = {end_time:f} seconds""")
SCREAMING_SNAKE_CASE_ : Any = time.time()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = BidirectionalAStar(init, goal)
SCREAMING_SNAKE_CASE_ : List[Any] = time.time() - bd_start_time
print(f"""BidirectionalAStar execution time = {bd_end_time:f} seconds""")
| 69
|
"""simple docstring"""
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class a ( _lowerCamelCase ):
"""simple docstring"""
def __init__( self: int , UpperCamelCase: str = "▁" , UpperCamelCase: bool = True , UpperCamelCase: Union[str, AddedToken] = "<unk>" , UpperCamelCase: Union[str, AddedToken] = "</s>" , UpperCamelCase: Union[str, AddedToken] = "<pad>" , ):
"""simple docstring"""
A__ = {
"""pad""": {"""id""": 0, """token""": pad_token},
"""eos""": {"""id""": 1, """token""": eos_token},
"""unk""": {"""id""": 2, """token""": unk_token},
}
A__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
A__ = token_dict["""token"""]
A__ = Tokenizer(Unigram() )
A__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(""" {2,}""" ) , """ """ ),
normalizers.Lowercase(),
] )
A__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=UpperCamelCase , add_prefix_space=UpperCamelCase ),
pre_tokenizers.Digits(individual_digits=UpperCamelCase ),
pre_tokenizers.Punctuation(),
] )
A__ = decoders.Metaspace(replacement=UpperCamelCase , add_prefix_space=UpperCamelCase )
A__ = TemplateProcessing(
single=f"""$A {self.special_tokens['eos']['token']}""" , special_tokens=[(self.special_tokens["""eos"""]["""token"""], self.special_tokens["""eos"""]["""id"""])] , )
A__ = {
"""model""": """SentencePieceUnigram""",
"""replacement""": replacement,
"""add_prefix_space""": add_prefix_space,
}
super().__init__(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: Tuple , UpperCamelCase: Union[str, List[str]] , UpperCamelCase: int = 80_00 , UpperCamelCase: bool = True , ):
"""simple docstring"""
A__ = trainers.UnigramTrainer(
vocab_size=UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase , )
if isinstance(UpperCamelCase , UpperCamelCase ):
A__ = [files]
self._tokenizer.train(UpperCamelCase , trainer=UpperCamelCase )
self.add_unk_id()
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Union[Iterator[str], Iterator[Iterator[str]]] , UpperCamelCase: int = 80_00 , UpperCamelCase: bool = True , ):
"""simple docstring"""
A__ = trainers.UnigramTrainer(
vocab_size=UpperCamelCase , special_tokens=self.special_tokens_list , show_progress=UpperCamelCase , )
self._tokenizer.train_from_iterator(UpperCamelCase , trainer=UpperCamelCase )
self.add_unk_id()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = json.loads(self._tokenizer.to_str() )
A__ = self.special_tokens["""unk"""]["""id"""]
A__ = Tokenizer.from_str(json.dumps(UpperCamelCase ) )
| 69
| 1
|
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = tempfile.mkdtemp()
# fmt: off
__lowerCamelCase = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__lowerCamelCase = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
__lowerCamelCase = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__lowerCamelCase = {"""unk_token""": """<unk>"""}
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(UpperCamelCase_ ) )
__lowerCamelCase = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4814_5466, 0.457_8275, 0.4082_1073],
"""image_std""": [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCamelCase = os.path.join(self.tmpdirname , UpperCamelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] , **UpperCamelCase_: str ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[Any] , **UpperCamelCase_: str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] , **UpperCamelCase_: List[str] ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def lowerCAmelCase__ ( self: int ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCamelCase = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ )
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCamelCase = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowerCamelCase = self.get_image_processor(do_normalize=UpperCamelCase_ )
__lowerCamelCase = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = image_processor(UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = processor(images=UpperCamelCase_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = processor(text=UpperCamelCase_ , return_tensors="""np""" )
__lowerCamelCase = tokenizer(UpperCamelCase_ , return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = """lower newer"""
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(text=UpperCamelCase_ , images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = """google/owlvit-base-patch32"""
__lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = ["""cat""", """nasa badge"""]
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = 16
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = """google/owlvit-base-patch32"""
__lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = [["""cat""", """nasa badge"""], ["""person"""]]
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = 16
__lowerCamelCase = len(UpperCamelCase_ )
__lowerCamelCase = max([len(UpperCamelCase_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = """google/owlvit-base-patch32"""
__lowerCamelCase = OwlViTProcessor.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = ["""cat""", """nasa badge"""]
__lowerCamelCase = processor(text=UpperCamelCase_ )
__lowerCamelCase = 16
__lowerCamelCase = inputs["""input_ids"""]
__lowerCamelCase = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = self.prepare_image_inputs()
__lowerCamelCase = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase_ ):
processor()
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.get_image_processor()
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ )
__lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCamelCase = processor.batch_decode(UpperCamelCase_ )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ )
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 12
|
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class A__ ( unittest.TestCase ):
lowercase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowercase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
A_ = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def snake_case_ ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Any:
'''simple docstring'''
A_ = generator("""Something there""" )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
A_ = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
A_ = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
[{"""generated_text""": ANY(UpperCamelCase__ )}, {"""generated_text""": ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def snake_case_ ( self ) -> Dict:
'''simple docstring'''
A_ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
A_ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
A_ = 3
A_ = generator(
"""Something there""" , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
A_ = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
A_ = generator("""This is a test""" , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
A_ = generator.model.config.eos_token_id
A_ = """<pad>"""
A_ = generator(
["""This is a test""", """This is a second test"""] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case_ ( self ) -> Any:
'''simple docstring'''
A_ = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
A_ = generator("""Something there""" , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{"""generated_text""": """"""}] )
| 162
| 0
|
'''simple docstring'''
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( __a ,unittest.TestCase ):
__SCREAMING_SNAKE_CASE :int = BlenderbotSmallTokenizer
__SCREAMING_SNAKE_CASE :Optional[int] = False
def snake_case__ ( self : Optional[int] ):
super().setUp()
__magic_name__ = ['''__start__''', '''adapt''', '''act''', '''ap@@''', '''te''', '''__end__''', '''__unk__''']
__magic_name__ = dict(zip(a__ , range(len(a__ ) ) ) )
__magic_name__ = ['''#version: 0.2''', '''a p''', '''t e</w>''', '''ap t</w>''', '''a d''', '''ad apt</w>''', '''a c''', '''ac t</w>''', '''''']
__magic_name__ = {'''unk_token''': '''__unk__''', '''bos_token''': '''__start__''', '''eos_token''': '''__end__'''}
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(a__ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(a__ ) )
def snake_case__ ( self : Optional[int] , **a__ : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **a__ )
def snake_case__ ( self : List[str] , a__ : List[Any] ):
__magic_name__ = '''adapt act apte'''
__magic_name__ = '''adapt act apte'''
return input_text, output_text
def snake_case__ ( self : List[str] ):
__magic_name__ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__magic_name__ = '''adapt act apte'''
__magic_name__ = ['''adapt''', '''act''', '''ap@@''', '''te''']
__magic_name__ = tokenizer.tokenize(a__ )
self.assertListEqual(a__ , a__ )
__magic_name__ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
__magic_name__ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , a__ )
def snake_case__ ( self : int ):
__magic_name__ = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
assert tok('''sam''' ).input_ids == [1384]
__magic_name__ = '''I am a small frog.'''
__magic_name__ = tok([src_text] , padding=a__ , truncation=a__ )['''input_ids''']
__magic_name__ = tok.batch_decode(a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def snake_case__ ( self : List[str] ):
__magic_name__ = BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
__magic_name__ = '''I am a small frog .'''
__magic_name__ = '''.'''
__magic_name__ = tok(a__ )['''input_ids''']
__magic_name__ = tok(a__ )['''input_ids''']
assert encoded[-1] == encoded_dot[0]
| 98
|
'''simple docstring'''
from pathlib import Path
import fire
from tqdm import tqdm
def UpperCamelCase ( a="ro" , a="en" , a="wmt16" , a=None ) -> None:
'''simple docstring'''
try:
import datasets
except (ModuleNotFoundError, ImportError):
raise ImportError('''run pip install datasets''' )
__magic_name__ = F'''{src_lang}-{tgt_lang}'''
print(F'''Converting {dataset}-{pair}''' )
__magic_name__ = datasets.load_dataset(a , a )
if save_dir is None:
__magic_name__ = F'''{dataset}-{pair}'''
__magic_name__ = Path(a )
save_dir.mkdir(exist_ok=a )
for split in ds.keys():
print(F'''Splitting {split} with {ds[split].num_rows} records''' )
# to save to val.source, val.target like summary datasets
__magic_name__ = '''val''' if split == '''validation''' else split
__magic_name__ = save_dir.joinpath(F'''{fn}.source''' )
__magic_name__ = save_dir.joinpath(F'''{fn}.target''' )
__magic_name__ = src_path.open('''w+''' )
__magic_name__ = tgt_path.open('''w+''' )
# reader is the bottleneck so writing one record at a time doesn't slow things down
for x in tqdm(ds[split] ):
__magic_name__ = x['''translation''']
src_fp.write(ex[src_lang] + '''\n''' )
tgt_fp.write(ex[tgt_lang] + '''\n''' )
print(F'''Saved {dataset} dataset to {save_dir}''' )
if __name__ == "__main__":
fire.Fire(download_wmt_dataset)
| 98
| 1
|
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __A :
"""simple docstring"""
def __init__( self , __A , __A=13 , __A=10 , __A=3 , __A=2 , __A=2 , __A=True , __A=True , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=10 , __A=0.02 , __A="divided_space_time" , __A=None , ) -> str:
a =parent
a =batch_size
a =image_size
a =num_channels
a =patch_size
a =num_frames
a =is_training
a =use_labels
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =attention_type
a =initializer_range
a =scope
a =num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a =(image_size // patch_size) ** 2
a =(num_frames) * self.num_patches_per_frame + 1
def SCREAMING_SNAKE_CASE ( self ) -> Any:
a =floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.num_labels )
a =self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a =self.num_labels
return config
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Dict:
a =TimesformerModel(config=__A )
model.to(__A )
model.eval()
a =model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> Union[str, Any]:
a =TimesformerForVideoClassification(__A )
model.to(__A )
model.eval()
a =model(__A )
# verify the logits shape
a =torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
a =self.prepare_config_and_inputs()
a , a , a =config_and_inputs
a ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __A ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__lowerCAmelCase = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
a =TimesformerModelTester(self )
a =ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE ( self , __A , __A , __A=False ) -> str:
a =copy.deepcopy(__A )
if return_labels:
if model_class in get_values(__A ):
a =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def SCREAMING_SNAKE_CASE ( self ) -> str:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(__A )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __A )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__A )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =TimesformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
if not self.has_attentions:
pass
else:
a , a =self.model_tester.prepare_config_and_inputs_for_common()
a =True
for model_class in self.all_model_classes:
a =self.model_tester.seq_length
a =self.model_tester.num_frames
a =True
a =False
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a =len(__A )
# Check attention is always last and order is fine
a =True
a =True
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
a =outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
def check_hidden_states_output(__A , __A , __A ):
a =model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
a =model(**self._prepare_for_class(__A , __A ) )
a =outputs.hidden_states
a =self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__A ) , __A )
a =self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a =True
check_hidden_states_output(__A , __A , __A )
def _A ( ):
"""simple docstring"""
a =hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
a =np.load(lowercase )
return list(lowercase )
@require_torch
@require_vision
class __A ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
a =TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
__A )
a =self.default_image_processor
a =prepare_video()
a =image_processor(video[:8] , return_tensors='''pt''' ).to(__A )
# forward pass
with torch.no_grad():
a =model(**__A )
# verify the logits
a =torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __A )
a =torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) )
| 81
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _lowerCAmelCase :
def __init__(self , lowercase , lowercase=13 , lowercase=3 , lowercase=True , lowercase=True , lowercase=0.1 , lowercase=0.1 , lowercase=224 , lowercase=1000 , lowercase=[3, 3, 6, 4] , lowercase=[48, 56, 112, 220] , ):
A_ : Dict = parent
A_ : List[Any] = batch_size
A_ : Dict = num_channels
A_ : Optional[Any] = is_training
A_ : List[str] = use_labels
A_ : List[Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : Tuple = num_labels
A_ : List[str] = image_size
A_ : str = layer_depths
A_ : Optional[int] = embed_dims
def _a (self ):
A_ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : int = None
if self.use_labels:
A_ : Tuple = ids_tensor([self.batch_size] , self.num_labels )
A_ : int = self.get_config()
return config, pixel_values, labels
def _a (self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="""gelu""" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowercase , layer_scale_init_value=1E-5 , )
def _a (self , lowercase , lowercase , lowercase ):
A_ : List[Any] = SwiftFormerModel(config=lowercase )
model.to(lowercase )
model.eval()
A_ : Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def _a (self , lowercase , lowercase , lowercase ):
A_ : Any = self.num_labels
A_ : Any = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : Optional[int] = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
A_ : int = SwiftFormerForImageClassification(lowercase )
model.to(lowercase )
model.eval()
A_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Dict = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _a (self ):
((A_), (A_), (A_)) : int = self.prepare_config_and_inputs()
A_ : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : int = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
def _a (self ):
A_ : Optional[int] = SwiftFormerModelTester(self )
A_ : Any = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def _a (self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""SwiftFormer does not use inputs_embeds""" )
def _a (self ):
pass
def _a (self ):
A_, A_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[Any] = model_class(lowercase )
A_ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def _a (self ):
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(lowercase )
A_ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : int = [*signature.parameters.keys()]
A_ : Tuple = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowercase )
def _a (self ):
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def _a (self ):
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def _a (self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[Any] = SwiftFormerModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
@unittest.skip(reason="""SwiftFormer does not output attentions""" )
def _a (self ):
pass
def _a (self ):
def check_hidden_states_output(lowercase , lowercase , lowercase ):
A_ : str = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
A_ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase ) )
A_ : Any = outputs.hidden_states
A_ : Any = 8
self.assertEqual(len(lowercase ) , lowercase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowercase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
A_, A_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ : str = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def _a (self ):
def _config_zero_init(lowercase ):
A_ : Optional[Any] = copy.deepcopy(lowercase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowercase , lowercase , 1E-10 )
if isinstance(getattr(lowercase , lowercase , lowercase ) , lowercase ):
A_ : Any = _config_zero_init(getattr(lowercase , lowercase ) )
setattr(lowercase , lowercase , lowercase )
return configs_no_init
A_, A_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A_ : Any = _config_zero_init(lowercase )
for model_class in self.all_model_classes:
A_ : List[str] = model_class(config=lowercase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9) / 1E9).round().item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a (self ):
pass
def a ( ):
'''simple docstring'''
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
@cached_property
def _a (self ):
return ViTImageProcessor.from_pretrained("""MBZUAI/swiftformer-xs""" ) if is_vision_available() else None
@slow
def _a (self ):
A_ : Any = SwiftFormerForImageClassification.from_pretrained("""MBZUAI/swiftformer-xs""" ).to(lowercase )
A_ : Dict = self.default_image_processor
A_ : Dict = prepare_img()
A_ : int = image_processor(images=lowercase , return_tensors="""pt""" ).to(lowercase )
# forward pass
with torch.no_grad():
A_ : int = model(**lowercase )
# verify the logits
A_ : int = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase )
A_ : List[str] = torch.tensor([[-2.1_703E00, 2.1_107E00, -2.0_811E00]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
| 206
| 0
|
'''simple docstring'''
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_a : Optional[int] = logging.get_logger("""transformers.models.encodec""")
_a : List[str] = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
_a : Union[str, Any] = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
_a : Optional[int] = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
_a : Tuple = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
_a : Any = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
_a : int = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_a : Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_a : Any = []
_a : str = []
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
__lowerCAmelCase = getattr(lowercase , lowercase )
if weight_type is not None:
__lowerCAmelCase = getattr(lowercase , lowercase ).shape
else:
__lowerCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}' )
if weight_type == "weight":
__lowerCAmelCase = value
elif weight_type == "weight_g":
__lowerCAmelCase = value
elif weight_type == "weight_v":
__lowerCAmelCase = value
elif weight_type == "bias":
__lowerCAmelCase = value
elif weight_type == "running_mean":
__lowerCAmelCase = value
elif weight_type == "running_var":
__lowerCAmelCase = value
elif weight_type == "num_batches_tracked":
__lowerCAmelCase = value
elif weight_type == "weight_ih_l0":
__lowerCAmelCase = value
elif weight_type == "weight_hh_l0":
__lowerCAmelCase = value
elif weight_type == "bias_ih_l0":
__lowerCAmelCase = value
elif weight_type == "bias_hh_l0":
__lowerCAmelCase = value
elif weight_type == "weight_ih_l1":
__lowerCAmelCase = value
elif weight_type == "weight_hh_l1":
__lowerCAmelCase = value
elif weight_type == "bias_ih_l1":
__lowerCAmelCase = value
elif weight_type == "bias_hh_l1":
__lowerCAmelCase = value
else:
__lowerCAmelCase = value
logger.info(f'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def _lowerCAmelCase ( lowercase , lowercase ) -> Optional[int]:
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__lowerCAmelCase , __lowerCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowerCAmelCase ( lowercase , lowercase , lowercase ) -> Union[str, Any]:
__lowerCAmelCase = []
if model_name == "encodec_24khz" or "encodec_32khz":
__lowerCAmelCase = MAPPING_24K
elif model_name == "encodec_48khz":
__lowerCAmelCase = MAPPING_48K
else:
raise ValueError(f'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(lowercase , lowercase ):
logger.info(f'{name} was ignored' )
continue
__lowerCAmelCase = False
for key, mapped_key in MAPPING.items():
if "*" in key:
__lowerCAmelCase , __lowerCAmelCase = key.split(""".*.""" )
if prefix in name and suffix in name:
__lowerCAmelCase = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith("""embed""" ) and name.endswith("""embed_avg""" ):
continue
__lowerCAmelCase = True
if "*" in mapped_key:
__lowerCAmelCase = name.split(lowercase )[0].split(""".""" )[-2]
__lowerCAmelCase = mapped_key.replace("""*""" , lowercase )
if "weight_g" in name:
__lowerCAmelCase = """weight_g"""
elif "weight_v" in name:
__lowerCAmelCase = """weight_v"""
elif "weight_ih_l0" in name:
__lowerCAmelCase = """weight_ih_l0"""
elif "weight_hh_l0" in name:
__lowerCAmelCase = """weight_hh_l0"""
elif "bias_ih_l0" in name:
__lowerCAmelCase = """bias_ih_l0"""
elif "bias_hh_l0" in name:
__lowerCAmelCase = """bias_hh_l0"""
elif "weight_ih_l1" in name:
__lowerCAmelCase = """weight_ih_l1"""
elif "weight_hh_l1" in name:
__lowerCAmelCase = """weight_hh_l1"""
elif "bias_ih_l1" in name:
__lowerCAmelCase = """bias_ih_l1"""
elif "bias_hh_l1" in name:
__lowerCAmelCase = """bias_hh_l1"""
elif "bias" in name:
__lowerCAmelCase = """bias"""
elif "weight" in name:
__lowerCAmelCase = """weight"""
elif "running_mean" in name:
__lowerCAmelCase = """running_mean"""
elif "running_var" in name:
__lowerCAmelCase = """running_var"""
elif "num_batches_tracked" in name:
__lowerCAmelCase = """num_batches_tracked"""
else:
__lowerCAmelCase = None
set_recursively(lowercase , lowercase , lowercase , lowercase , lowercase )
continue
if not is_used:
unused_weights.append(lowercase )
logger.warning(f'Unused weights: {unused_weights}' )
@torch.no_grad()
def _lowerCAmelCase ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None , ) -> Tuple:
if config_path is not None:
__lowerCAmelCase = EncodecConfig.from_pretrained(lowercase )
else:
__lowerCAmelCase = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
__lowerCAmelCase = [8, 5, 4, 4]
__lowerCAmelCase = [2.2]
__lowerCAmelCase = 64
__lowerCAmelCase = 3_2000
__lowerCAmelCase = 2048
__lowerCAmelCase = False
__lowerCAmelCase = False
__lowerCAmelCase = False
elif model_name == "encodec_48khz":
__lowerCAmelCase = [8, 5, 4, 2]
__lowerCAmelCase = [3.0, 6.0, 12.0, 24.0]
__lowerCAmelCase = 4_8000
__lowerCAmelCase = 2
__lowerCAmelCase = False
__lowerCAmelCase = """time_group_norm"""
__lowerCAmelCase = True
__lowerCAmelCase = 1.0
__lowerCAmelCase = 0.01
else:
raise ValueError(f'Unknown model name: {model_name}' )
__lowerCAmelCase = EncodecModel(lowercase )
__lowerCAmelCase = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(lowercase )
__lowerCAmelCase = torch.load(lowercase )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
__lowerCAmelCase = original_checkpoint["""best_state"""]
recursively_load_weights(lowercase , lowercase , lowercase )
model.save_pretrained(lowercase )
if repo_id:
print("""Pushing to the hub...""" )
feature_extractor.push_to_hub(lowercase )
model.push_to_hub(lowercase )
if __name__ == "__main__":
_a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
_a : Union[str, Any] = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 46
|
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCAmelCase ( lowerCAmelCase_ , unittest.TestCase ):
a : Dict =MvpTokenizer
a : int =MvpTokenizerFast
a : Any =True
a : int =filter_roberta_detectors
def lowerCamelCase__ ( self ):
'''simple docstring'''
super().setUp()
__lowerCAmelCase = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__lowerCAmelCase = dict(zip(__SCREAMING_SNAKE_CASE,range(len(__SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__lowerCAmelCase = {"""unk_token""": """<unk>"""}
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""vocab_file"""] )
__lowerCAmelCase = os.path.join(self.tmpdirname,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file,"""w""",encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,**__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname,**__SCREAMING_SNAKE_CASE )
def lowerCamelCase__ ( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizer.from_pretrained("""RUCAIBox/mvp""" )
@cached_property
def lowerCamelCase__ ( self ):
'''simple docstring'''
return MvpTokenizerFast.from_pretrained("""RUCAIBox/mvp""" )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
__lowerCAmelCase = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,max_length=len(__SCREAMING_SNAKE_CASE ),padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual((2, 9),batch.input_ids.shape )
self.assertEqual((2, 9),batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
# Test that special tokens are reset
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,padding=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
# check if input_ids are returned and no labels
self.assertIn("""input_ids""",__SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""labels""",__SCREAMING_SNAKE_CASE )
self.assertNotIn("""decoder_attention_mask""",__SCREAMING_SNAKE_CASE )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=__SCREAMING_SNAKE_CASE,max_length=32,padding="""max_length""",return_tensors="""pt""" )
self.assertEqual(32,targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
["""I am a small frog""" * 10_24, """I am a small frog"""],padding=__SCREAMING_SNAKE_CASE,truncation=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
self.assertIsInstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE )
self.assertEqual(batch.input_ids.shape,(2, 10_24) )
@require_torch
def lowerCamelCase__ ( self ):
'''simple docstring'''
__lowerCAmelCase = ["""A long paragraph for summarization."""]
__lowerCAmelCase = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE,text_target=__SCREAMING_SNAKE_CASE,return_tensors="""pt""" )
__lowerCAmelCase = inputs["""input_ids"""]
__lowerCAmelCase = inputs["""labels"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowerCamelCase__ ( self ):
'''simple docstring'''
pass
def lowerCamelCase__ ( self ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(__SCREAMING_SNAKE_CASE,**__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = """A, <mask> AllenNLP sentence."""
__lowerCAmelCase = tokenizer_r.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = tokenizer_p.encode_plus(__SCREAMING_SNAKE_CASE,add_special_tokens=__SCREAMING_SNAKE_CASE,return_token_type_ids=__SCREAMING_SNAKE_CASE )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["""token_type_ids"""] ),sum(tokens_p["""token_type_ids"""] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ),sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ),)
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""],[0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
__SCREAMING_SNAKE_CASE,["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 46
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class a_ (lowerCAmelCase__ ):
__lowerCAmelCase : Any = """audio-spectrogram-transformer"""
def __init__( self , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=1_6 , snake_case_=True , snake_case_=1_0 , snake_case_=1_0 , snake_case_=1_0_2_4 , snake_case_=1_2_8 , **snake_case_ , ):
super().__init__(**_A )
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[str] = num_hidden_layers
_lowerCAmelCase : List[str] = num_attention_heads
_lowerCAmelCase : Any = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[int] = initializer_range
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Tuple = patch_size
_lowerCAmelCase : Union[str, Any] = qkv_bias
_lowerCAmelCase : int = frequency_stride
_lowerCAmelCase : int = time_stride
_lowerCAmelCase : int = max_length
_lowerCAmelCase : List[str] = num_mel_bins
| 309
|
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Dict = OmegaConf.load(snake_case )
if display:
print(yaml.dump(OmegaConf.to_container(snake_case ) ) )
return config
def a__ ( snake_case , snake_case=None , snake_case=None ):
"""simple docstring"""
if conf_path is None:
__SCREAMING_SNAKE_CASE : Any = '''./model_checkpoints/vqgan_only.yaml'''
__SCREAMING_SNAKE_CASE : List[str] = load_config(snake_case , display=snake_case )
__SCREAMING_SNAKE_CASE : str = VQModel(**config.model.params )
if ckpt_path is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = '''./model_checkpoints/vqgan_only.pt'''
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.load(snake_case , map_location=snake_case )
if ".ckpt" in ckpt_path:
__SCREAMING_SNAKE_CASE : Optional[Any] = sd['''state_dict''']
model.load_state_dict(snake_case , strict=snake_case )
model.to(snake_case )
del sd
return model
def a__ ( snake_case , snake_case ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.encode(snake_case )
print(F'''VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}''' )
__SCREAMING_SNAKE_CASE : Any = model.decode(snake_case )
return xrec
def a__ ( snake_case , snake_case=False ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : str = string.rsplit('''.''' , 1 )
if reload:
__SCREAMING_SNAKE_CASE : Union[str, Any] = importlib.import_module(snake_case )
importlib.reload(snake_case )
return getattr(importlib.import_module(snake_case , package=snake_case ) , cls )
def a__ ( snake_case ):
"""simple docstring"""
if "target" not in config:
raise KeyError('''Expected key `target` to instantiate.''' )
return get_obj_from_str(config['''target'''] )(**config.get('''params''' , {} ) )
def a__ ( snake_case , snake_case , snake_case=True , snake_case=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = instantiate_from_config(snake_case )
if sd is not None:
model.load_state_dict(snake_case )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def a__ ( snake_case , snake_case , snake_case , snake_case ):
"""simple docstring"""
# load the specified checkpoint
if ckpt:
__SCREAMING_SNAKE_CASE : Dict = torch.load(snake_case , map_location='''cpu''' )
__SCREAMING_SNAKE_CASE : List[Any] = pl_sd['''global_step''']
print(F'''loaded model from global step {global_step}.''' )
else:
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''state_dict''': None}
__SCREAMING_SNAKE_CASE : Optional[Any] = None
__SCREAMING_SNAKE_CASE : Dict = load_model_from_config(config.model , pl_sd['''state_dict'''] , gpu=snake_case , eval_mode=snake_case )['''model''']
return model, global_step
| 303
| 0
|
'''simple docstring'''
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : List[str] = logging.get_logger(__name__)
lowerCamelCase : str = {
"RUCAIBox/mvp": "https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json",
}
class A__ ( A__ ):
A__ = 'mvp'
A__ = ['past_key_values']
A__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : str , _a : List[str]=5_0267 , _a : int=1024 , _a : str=12 , _a : Tuple=4096 , _a : List[Any]=16 , _a : str=12 , _a : Union[str, Any]=4096 , _a : Dict=16 , _a : Any=0.0 , _a : Union[str, Any]=0.0 , _a : str="gelu" , _a : List[Any]=1024 , _a : List[str]=0.1 , _a : List[Any]=0.0 , _a : Any=0.0 , _a : Any=0.02 , _a : Any=0.0 , _a : Any=False , _a : Tuple=True , _a : List[Any]=1 , _a : Tuple=0 , _a : Union[str, Any]=2 , _a : Union[str, Any]=True , _a : str=2 , _a : str=2 , _a : Optional[int]=False , _a : List[Any]=100 , _a : List[Any]=800 , **_a : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =vocab_size
_SCREAMING_SNAKE_CASE =max_position_embeddings
_SCREAMING_SNAKE_CASE =d_model
_SCREAMING_SNAKE_CASE =encoder_ffn_dim
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =encoder_attention_heads
_SCREAMING_SNAKE_CASE =decoder_ffn_dim
_SCREAMING_SNAKE_CASE =decoder_layers
_SCREAMING_SNAKE_CASE =decoder_attention_heads
_SCREAMING_SNAKE_CASE =dropout
_SCREAMING_SNAKE_CASE =attention_dropout
_SCREAMING_SNAKE_CASE =activation_dropout
_SCREAMING_SNAKE_CASE =activation_function
_SCREAMING_SNAKE_CASE =init_std
_SCREAMING_SNAKE_CASE =encoder_layerdrop
_SCREAMING_SNAKE_CASE =decoder_layerdrop
_SCREAMING_SNAKE_CASE =classifier_dropout
_SCREAMING_SNAKE_CASE =use_cache
_SCREAMING_SNAKE_CASE =encoder_layers
_SCREAMING_SNAKE_CASE =scale_embedding # scale factor will be sqrt(d_model) if True
_SCREAMING_SNAKE_CASE =use_prompt
_SCREAMING_SNAKE_CASE =prompt_length
_SCREAMING_SNAKE_CASE =prompt_mid_dim
super().__init__(
pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , is_encoder_decoder=_a , decoder_start_token_id=_a , forced_eos_token_id=_a , **_a , )
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _a ):
_SCREAMING_SNAKE_CASE =self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'The config can simply be saved and uploaded again to be fixed.' )
| 350
|
'''simple docstring'''
from math import factorial
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_SCREAMING_SNAKE_CASE =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_SCREAMING_SNAKE_CASE =float(factorial(_UpperCamelCase ) )
coefficient /= factorial(_UpperCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 114
| 0
|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__UpperCamelCase = logging.get_logger(__name__)
class UpperCamelCase ( lowerCAmelCase__ ):
def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None:
warnings.warn(
'The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use OwlViTImageProcessor instead.', lowerCAmelCase__, )
super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
| 69
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = False ) -> bool:
if n == 2:
return True
if not n % 2 or n < 2:
return False
if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit
return False
if n > 3317044064679887385961981 and not allow_probable:
raise ValueError(
'Warning: upper bound of deterministic test is exceeded. '
'Pass allow_probable=True to allow probabilistic test. '
'A return value of True indicates a probable prime.' )
# array bounds provided by analysis
snake_case_ = [
2047,
1373653,
25326001,
3215031751,
2152302898747,
3474749660383,
341550071728321,
1,
3825123056546413051,
1,
1,
318665857834031151167461,
3317044064679887385961981,
]
snake_case_ = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41]
for idx, _p in enumerate(UpperCAmelCase , 1 ):
if n < _p:
# then we have our last prime to check
snake_case_ = primes[:idx]
break
snake_case_ , snake_case_ = n - 1, 0
# break up n -1 into a power of 2 (s) and
# remaining odd component
# essentially, solve for d * 2 ** s == n - 1
while d % 2 == 0:
d //= 2
s += 1
for prime in plist:
snake_case_ = False
for r in range(UpperCAmelCase ):
snake_case_ = pow(UpperCAmelCase , d * 2**r , UpperCAmelCase )
# see article for analysis explanation for m
if (r == 0 and m == 1) or ((m + 1) % n == 0):
snake_case_ = True
# this loop will not determine compositeness
break
if pr:
continue
# if pr is False, then the above loop never evaluated to true,
# and the n MUST be composite
return False
return True
def UpperCAmelCase ( ) -> None:
assert not miller_rabin(561 )
assert miller_rabin(563 )
# 2047
assert not miller_rabin(838201 )
assert miller_rabin(838207 )
# 1_373_653
assert not miller_rabin(17316001 )
assert miller_rabin(17316017 )
# 25_326_001
assert not miller_rabin(3078386641 )
assert miller_rabin(3078386653 )
# 3_215_031_751
assert not miller_rabin(1713045574801 )
assert miller_rabin(1713045574819 )
# 2_152_302_898_747
assert not miller_rabin(2779799728307 )
assert miller_rabin(2779799728327 )
# 3_474_749_660_383
assert not miller_rabin(113850023909441 )
assert miller_rabin(113850023909527 )
# 341_550_071_728_321
assert not miller_rabin(1275041018848804351 )
assert miller_rabin(1275041018848804391 )
# 3_825_123_056_546_413_051
assert not miller_rabin(79666464458507787791867 )
assert miller_rabin(79666464458507787791951 )
# 318_665_857_834_031_151_167_461
assert not miller_rabin(552840677446647897660333 )
assert miller_rabin(552840677446647897660359 )
# 3_317_044_064_679_887_385_961_981
# upper limit for probabilistic test
if __name__ == "__main__":
test_miller_rabin()
| 69
| 1
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
lowercase : Optional[int] = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
lowercase : List[str] = TaTokenizerFast
lowercase : Any = {"""configuration_mt5""": ["""MT5Config""", """MT5OnnxConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Dict = [
"""MT5EncoderModel""",
"""MT5ForConditionalGeneration""",
"""MT5ForQuestionAnswering""",
"""MT5Model""",
"""MT5PreTrainedModel""",
"""MT5Stack""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : List[Any] = ["""TFMT5EncoderModel""", """TFMT5ForConditionalGeneration""", """TFMT5Model"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = ["""FlaxMT5EncoderModel""", """FlaxMT5ForConditionalGeneration""", """FlaxMT5Model"""]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
lowercase : Union[str, Any] = _LazyModule(
__name__,
globals()["""__file__"""],
_import_structure,
extra_objects={"""MT5Tokenizer""": MTaTokenizer, """MT5TokenizerFast""": MTaTokenizerFast},
module_spec=__spec__,
)
| 285
|
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 285
| 1
|
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
lowerCAmelCase__ : Tuple = datasets.utils.logging.get_logger(__name__)
lowerCAmelCase__ : Any = ['names', 'prefix']
lowerCAmelCase__ : List[Any] = ['warn_bad_lines', 'error_bad_lines', 'mangle_dupe_cols']
lowerCAmelCase__ : List[str] = ['encoding_errors', 'on_bad_lines']
lowerCAmelCase__ : Optional[Any] = ['date_format']
@dataclass
class snake_case ( datasets.BuilderConfig ):
"""simple docstring"""
snake_case__ = ","
snake_case__ = None
snake_case__ = "infer"
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = True
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = False
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = True
snake_case__ = True
snake_case__ = False
snake_case__ = True
snake_case__ = None
snake_case__ = "."
snake_case__ = None
snake_case__ = '"'
snake_case__ = 0
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = None
snake_case__ = True
snake_case__ = True
snake_case__ = 0
snake_case__ = True
snake_case__ = False
snake_case__ = None
snake_case__ = 1_00_00
snake_case__ = None
snake_case__ = "strict"
snake_case__ = "error"
snake_case__ = None
def __lowerCAmelCase ( self : int ):
if self.delimiter is not None:
UpperCAmelCase__ = self.delimiter
if self.column_names is not None:
UpperCAmelCase__ = self.column_names
@property
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = {
'sep': self.sep,
'header': self.header,
'names': self.names,
'index_col': self.index_col,
'usecols': self.usecols,
'prefix': self.prefix,
'mangle_dupe_cols': self.mangle_dupe_cols,
'engine': self.engine,
'converters': self.converters,
'true_values': self.true_values,
'false_values': self.false_values,
'skipinitialspace': self.skipinitialspace,
'skiprows': self.skiprows,
'nrows': self.nrows,
'na_values': self.na_values,
'keep_default_na': self.keep_default_na,
'na_filter': self.na_filter,
'verbose': self.verbose,
'skip_blank_lines': self.skip_blank_lines,
'thousands': self.thousands,
'decimal': self.decimal,
'lineterminator': self.lineterminator,
'quotechar': self.quotechar,
'quoting': self.quoting,
'escapechar': self.escapechar,
'comment': self.comment,
'encoding': self.encoding,
'dialect': self.dialect,
'error_bad_lines': self.error_bad_lines,
'warn_bad_lines': self.warn_bad_lines,
'skipfooter': self.skipfooter,
'doublequote': self.doublequote,
'memory_map': self.memory_map,
'float_precision': self.float_precision,
'chunksize': self.chunksize,
'encoding_errors': self.encoding_errors,
'on_bad_lines': self.on_bad_lines,
'date_format': self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() ,lowerCamelCase__ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class snake_case ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
snake_case__ = CsvConfig
def __lowerCAmelCase ( self : List[str] ):
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : Any ,lowerCamelCase__ : Optional[Any] ):
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
UpperCAmelCase__ = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase__ ,(str, list, tuple) ):
UpperCAmelCase__ = data_files
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [files]
UpperCAmelCase__ = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN ,gen_kwargs={'files': files} )]
UpperCAmelCase__ = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
UpperCAmelCase__ = [files]
UpperCAmelCase__ = [dl_manager.iter_files(lowerCamelCase__ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase__ ,gen_kwargs={'files': files} ) )
return splits
def __lowerCAmelCase ( self : Union[str, Any] ,lowerCamelCase__ : pa.Table ):
if self.config.features is not None:
UpperCAmelCase__ = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCamelCase__ ) for feature in self.config.features.values() ):
# cheaper cast
UpperCAmelCase__ = pa.Table.from_arrays([pa_table[field.name] for field in schema] ,schema=lowerCamelCase__ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
UpperCAmelCase__ = table_cast(lowerCamelCase__ ,lowerCamelCase__ )
return pa_table
def __lowerCAmelCase ( self : Dict ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
UpperCAmelCase__ = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase__ ) else object
for name, dtype, feature in zip(schema.names ,schema.types ,self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase__ ) ):
UpperCAmelCase__ = pd.read_csv(lowerCamelCase__ ,iterator=lowerCamelCase__ ,dtype=lowerCamelCase__ ,**self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCamelCase__ ):
UpperCAmelCase__ = pa.Table.from_pandas(lowerCamelCase__ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase__ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCamelCase__ )}: {e}''' )
raise
| 98
|
"""simple docstring"""
import os
import sys
import unittest
lowerCAmelCase__ : Tuple = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
lowerCAmelCase__ : Tuple = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py')
lowerCAmelCase__ : Optional[Any] = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py')
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_test_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {'BertModelTest': 'BertModelTester'}
UpperCAmelCase__ = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : Optional[Any] ):
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_test_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = get_model_to_tester_mapping(lowerCamelCase__ )
UpperCAmelCase__ = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
UpperCAmelCase__ = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
self.assertEqual(get_test_info.to_json(lowerCamelCase__ ) ,lowerCamelCase__ )
| 98
| 1
|
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : List[Any] = RobertaConfig
_snake_case : str = 'roberta'
def __init__( self : Tuple , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = RobertaEmbeddings(lowerCAmelCase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , __magic_name__ , )
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
_snake_case : Any = RobertaConfig
_snake_case : int = 'roberta'
def __init__( self : str , lowerCAmelCase__ : int ) -> Optional[int]:
'''simple docstring'''
super().__init__(lowerCAmelCase__ )
_UpperCamelCase = config.num_labels
_UpperCamelCase = config.num_hidden_layers
_UpperCamelCase = DeeRobertaModel(lowerCAmelCase__ )
_UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
_UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=None , lowerCAmelCase__ : Any=None , lowerCAmelCase__ : int=-1 , lowerCAmelCase__ : Optional[int]=False , ) -> Any:
'''simple docstring'''
_UpperCamelCase = self.num_layers
try:
_UpperCamelCase = self.roberta(
lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , token_type_ids=lowerCAmelCase__ , position_ids=lowerCAmelCase__ , head_mask=lowerCAmelCase__ , inputs_embeds=lowerCAmelCase__ , )
_UpperCamelCase = outputs[1]
_UpperCamelCase = self.dropout(lowerCAmelCase__ )
_UpperCamelCase = self.classifier(lowerCAmelCase__ )
_UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
_UpperCamelCase = e.message
_UpperCamelCase = e.exit_layer
_UpperCamelCase = outputs[0]
if not self.training:
_UpperCamelCase = entropy(lowerCAmelCase__ )
_UpperCamelCase = []
_UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
_UpperCamelCase = MSELoss()
_UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
_UpperCamelCase = []
for highway_exit in outputs[-1]:
_UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowerCAmelCase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
_UpperCamelCase = MSELoss()
_UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
_UpperCamelCase = CrossEntropyLoss()
_UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(lowerCAmelCase__ )
if train_highway:
_UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
_UpperCamelCase = (loss,) + outputs
if not self.training:
_UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
_UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 287
|
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase ):
for j in range(lowercase ):
_UpperCamelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ : Optional[int] = imread('image_data/lena.jpg', 1)
# convert to its negative
lowercase__ : Union[str, Any] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 287
| 1
|
"""simple docstring"""
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
SCREAMING_SNAKE_CASE__ = get_logger(__name__)
class lowercase :
def __init__( self , lowercase = None ) -> List[Any]:
lowerCAmelCase = (
os.path.join(lowercase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
lowerCAmelCase = Extractor
def _snake_case ( self , lowercase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
lowerCAmelCase = os.path.abspath(lowercase )
return os.path.join(self.extract_dir , hash_url_to_filename(lowercase ) )
def _snake_case ( self , lowercase , lowercase ) -> bool:
return force_extract or (
not os.path.isfile(lowercase ) and not (os.path.isdir(lowercase ) and os.listdir(lowercase ))
)
def _snake_case ( self , lowercase , lowercase = False ) -> str:
lowerCAmelCase = self.extractor.infer_extractor_format(lowercase )
if not extractor_format:
return input_path
lowerCAmelCase = self._get_output_path(lowercase )
if self._do_extract(lowercase , lowercase ):
self.extractor.extract(lowercase , lowercase , lowercase )
return output_path
class lowercase ( _UpperCAmelCase ):
@classmethod
@abstractmethod
def _snake_case ( cls , lowercase , **lowercase ) -> bool:
...
@staticmethod
@abstractmethod
def _snake_case ( lowercase , lowercase ) -> None:
...
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = []
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Union[str, Any]:
with open(lowercase , """rb""" ) as f:
return f.read(lowercase )
@classmethod
def _snake_case ( cls , lowercase , lowercase = b"" ) -> bool:
if not magic_number:
lowerCAmelCase = max(len(lowercase ) for cls_magic_number in cls.magic_numbers )
try:
lowerCAmelCase = cls.read_magic_number(lowercase , lowercase )
except OSError:
return False
return any(magic_number.startswith(lowercase ) for cls_magic_number in cls.magic_numbers )
class lowercase ( _UpperCAmelCase ):
@classmethod
def _snake_case ( cls , lowercase , **lowercase ) -> bool:
return tarfile.is_tarfile(lowercase )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> Optional[int]:
def resolved(lowercase ) -> str:
return os.path.realpath(os.path.abspath(lowercase ) )
def badpath(lowercase , lowercase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(lowercase , lowercase ) ).startswith(lowercase )
def badlink(lowercase , lowercase ) -> bool:
# Links are interpreted relative to the directory containing the link
lowerCAmelCase = resolved(os.path.join(lowercase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=lowercase )
lowerCAmelCase = resolved(lowercase )
for finfo in members:
if badpath(finfo.name , lowercase ):
logger.error(f'Extraction of {finfo.name} is blocked (illegal path)' )
elif finfo.issym() and badlink(lowercase , lowercase ):
logger.error(f'Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}' )
elif finfo.islnk() and badlink(lowercase , lowercase ):
logger.error(f'Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}' )
else:
yield finfo
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
lowerCAmelCase = tarfile.open(lowercase )
tar_file.extractall(lowercase , members=TarExtractor.safemembers(lowercase , lowercase ) )
tar_file.close()
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [B'\x1F\x8B']
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
with gzip.open(lowercase , """rb""" ) as gzip_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [
B'PK\x03\x04',
B'PK\x05\x06', # empty archive
B'PK\x07\x08', # spanned archive
]
@classmethod
def _snake_case ( cls , lowercase , lowercase = b"" ) -> bool:
if super().is_extractable(lowercase , magic_number=lowercase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(lowercase , """rb""" ) as fp:
lowerCAmelCase = _EndRecData(lowercase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
lowerCAmelCase = fp.read(lowercase ) # CD is where we expect it to be
if len(lowercase ) == sizeCentralDir:
lowerCAmelCase = struct.unpack(lowercase , lowercase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
os.makedirs(lowercase , exist_ok=lowercase )
with zipfile.ZipFile(lowercase , """r""" ) as zip_file:
zip_file.extractall(lowercase )
zip_file.close()
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [B'\xFD\x37\x7A\x58\x5A\x00']
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
with lzma.open(lowercase ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [B'Rar!\x1a\x07\x00', B'Rar!\x1a\x07\x01\x00'] # RAR_ID # RAR5_ID
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(lowercase , exist_ok=lowercase )
lowerCAmelCase = rarfile.RarFile(lowercase )
rf.extractall(lowercase )
rf.close()
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [B'\x28\xb5\x2F\xFD']
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
lowerCAmelCase = zstd.ZstdDecompressor()
with open(lowercase , """rb""" ) as ifh, open(lowercase , """wb""" ) as ofh:
dctx.copy_stream(lowercase , lowercase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [B'\x42\x5A\x68']
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
with bza.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [B'\x37\x7A\xBC\xAF\x27\x1C']
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(lowercase , exist_ok=lowercase )
with pyazr.SevenZipFile(lowercase , """r""" ) as archive:
archive.extractall(lowercase )
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = [B'\x04\x22\x4D\x18']
@staticmethod
def _snake_case ( lowercase , lowercase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(lowercase , """rb""" ) as compressed_file:
with open(lowercase , """wb""" ) as extracted_file:
shutil.copyfileobj(lowercase , lowercase )
class lowercase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
_SCREAMING_SNAKE_CASE = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _snake_case ( cls ) -> List[str]:
return max(
len(lowercase )
for extractor in cls.extractors.values()
if issubclass(lowercase , lowercase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _snake_case ( lowercase , lowercase ) -> str:
try:
return MagicNumberBaseExtractor.read_magic_number(lowercase , magic_number_length=lowercase )
except OSError:
return b""
@classmethod
def _snake_case ( cls , lowercase , lowercase = False ) -> bool:
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=lowercase , )
lowerCAmelCase = cls.infer_extractor_format(lowercase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _snake_case ( cls , lowercase ) -> str: # <Added version="2.4.0"/>
lowerCAmelCase = cls._get_magic_number_max_length()
lowerCAmelCase = cls._read_magic_number(lowercase , lowercase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(lowercase , magic_number=lowercase ):
return extractor_format
@classmethod
def _snake_case ( cls , lowercase , lowercase , lowercase = None , lowercase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(lowercase ) , exist_ok=lowercase )
# Prevent parallel extractions
lowerCAmelCase = str(Path(lowercase ).with_suffix(""".lock""" ) )
with FileLock(lowercase ):
shutil.rmtree(lowercase , ignore_errors=lowercase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(lowercase , lowercase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=lowercase , )
lowerCAmelCase = extractor if extractor != """deprecated""" else extractor_format
else:
lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(lowercase , lowercase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=lowercase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(lowercase ):
return extractor.extract(lowercase , lowercase )
| 46
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
"openai/imagegpt-small": "",
"openai/imagegpt-medium": "",
"openai/imagegpt-large": "",
}
class lowercase ( _UpperCAmelCase ):
_SCREAMING_SNAKE_CASE = 'imagegpt'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , lowercase=512 + 1 , lowercase=32 * 32 , lowercase=512 , lowercase=24 , lowercase=8 , lowercase=None , lowercase="quick_gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.1 , lowercase=1e-5 , lowercase=0.02 , lowercase=True , lowercase=True , lowercase=False , lowercase=False , lowercase=False , **lowercase , ) -> Any:
lowerCAmelCase = vocab_size
lowerCAmelCase = n_positions
lowerCAmelCase = n_embd
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = n_inner
lowerCAmelCase = activation_function
lowerCAmelCase = resid_pdrop
lowerCAmelCase = embd_pdrop
lowerCAmelCase = attn_pdrop
lowerCAmelCase = layer_norm_epsilon
lowerCAmelCase = initializer_range
lowerCAmelCase = scale_attn_weights
lowerCAmelCase = use_cache
lowerCAmelCase = scale_attn_by_inverse_layer_idx
lowerCAmelCase = reorder_and_upcast_attn
lowerCAmelCase = tie_word_embeddings
super().__init__(tie_word_embeddings=lowercase , **lowercase )
class lowercase ( _UpperCAmelCase ):
@property
def _snake_case ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _snake_case ( self , lowercase , lowercase = 1 , lowercase = -1 , lowercase = False , lowercase = None , lowercase = 3 , lowercase = 32 , lowercase = 32 , ) -> Mapping[str, Any]:
lowerCAmelCase = self._generate_dummy_images(lowercase , lowercase , lowercase , lowercase )
lowerCAmelCase = dict(preprocessor(images=lowercase , return_tensors=lowercase ) )
return inputs
| 46
| 1
|
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
UpperCamelCase__ : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( snake_case_=None, snake_case_=None ) -> Union[str, Any]:
"""simple docstring"""
return field(default_factory=lambda: default, metadata=snake_case_ )
@dataclass
class lowerCamelCase_ :
SCREAMING_SNAKE_CASE_ = list_field(
default=[] , metadata={
'help': (
'Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'
' of all available models'
)
} , )
SCREAMING_SNAKE_CASE_ = list_field(
default=[8] , metadata={'help': 'List of batch sizes for which memory and time performance will be evaluated'} )
SCREAMING_SNAKE_CASE_ = list_field(
default=[8, 32, 1_28, 5_12] , metadata={'help': 'List of sequence lengths for which memory and time performance will be evaluated'} , )
SCREAMING_SNAKE_CASE_ = field(
default=a_ , metadata={'help': 'Whether to benchmark inference of model. Inference can be disabled via --no-inference.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=a_ , metadata={'help': 'Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=a_ , metadata={'help': 'Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'} )
SCREAMING_SNAKE_CASE_ = field(default=a_ , metadata={'help': 'Use FP16 to accelerate inference.'} )
SCREAMING_SNAKE_CASE_ = field(default=a_ , metadata={'help': 'Benchmark training of model'} )
SCREAMING_SNAKE_CASE_ = field(default=a_ , metadata={'help': 'Verbose memory tracing'} )
SCREAMING_SNAKE_CASE_ = field(
default=a_ , metadata={'help': 'Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=a_ , metadata={
'help': 'Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'
} , )
SCREAMING_SNAKE_CASE_ = field(default=a_ , metadata={'help': 'Trace memory line by line'} )
SCREAMING_SNAKE_CASE_ = field(default=a_ , metadata={'help': 'Save result to a CSV file'} )
SCREAMING_SNAKE_CASE_ = field(default=a_ , metadata={'help': 'Save all print statements in a log file'} )
SCREAMING_SNAKE_CASE_ = field(default=a_ , metadata={'help': 'Whether to print environment information'} )
SCREAMING_SNAKE_CASE_ = field(
default=a_ , metadata={
'help': (
'Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'
' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'
' for debugging / testing and on TPU.'
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"inference_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"inference_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"train_time_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving time results to csv for training.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"train_memory_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving memory results to csv for training.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"env_info_{round(time() )}.csv" , metadata={'help': 'CSV filename used if saving environment information.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=f"log_{round(time() )}.csv" , metadata={'help': 'Log filename used if print statements are saved in log.'} , )
SCREAMING_SNAKE_CASE_ = field(default=3 , metadata={'help': 'Times an experiment will be run.'} )
SCREAMING_SNAKE_CASE_ = field(
default=a_ , metadata={
'help': (
'Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'
' model weights.'
)
} , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' ,__lowerCamelCase ,)
def SCREAMING_SNAKE_CASE_ ( self : Any ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) ,indent=2 )
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 367
|
import re
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> str:
"""simple docstring"""
if len(re.findall('''[ATCG]''', snake_case_ ) ) != len(snake_case_ ):
raise ValueError('''Invalid Strand''' )
return dna.translate(dna.maketrans('''ATCG''', '''TAGC''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 330
| 0
|
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class a__ ( lowercase__ ):
A = 'MCTCTFeatureExtractor'
A = 'AutoTokenizer'
def __init__( self : Tuple,_A : int,_A : Dict ):
"""simple docstring"""
super().__init__(__lowercase,__lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __call__( self : int,*_A : Tuple,**_A : Optional[int] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor(*__lowercase,**__lowercase )
if "raw_speech" in kwargs:
warnings.warn("Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead." )
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop("raw_speech" )
else:
SCREAMING_SNAKE_CASE_ : Dict = kwargs.pop("audio",__lowercase )
SCREAMING_SNAKE_CASE_ : List[str] = kwargs.pop("sampling_rate",__lowercase )
SCREAMING_SNAKE_CASE_ : Tuple = kwargs.pop("text",__lowercase )
if len(__lowercase ) > 0:
SCREAMING_SNAKE_CASE_ : Tuple = args[0]
SCREAMING_SNAKE_CASE_ : str = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if audio is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.feature_extractor(__lowercase,*__lowercase,sampling_rate=__lowercase,**__lowercase )
if text is not None:
SCREAMING_SNAKE_CASE_ : str = self.tokenizer(__lowercase,**__lowercase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
SCREAMING_SNAKE_CASE_ : Dict = encodings["""input_ids"""]
return inputs
def __UpperCamelCase ( self : Optional[Any],*_A : List[Any],**_A : int ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase,**__lowercase )
def __UpperCamelCase ( self : Optional[int],*_A : Optional[Any],**_A : List[str] ):
"""simple docstring"""
if self._in_target_context_manager:
return self.current_processor.pad(*__lowercase,**__lowercase )
SCREAMING_SNAKE_CASE_ : Optional[int] = kwargs.pop("input_features",__lowercase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = kwargs.pop("labels",__lowercase )
if len(__lowercase ) > 0:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = args[0]
SCREAMING_SNAKE_CASE_ : str = args[1:]
if input_features is not None:
SCREAMING_SNAKE_CASE_ : Any = self.feature_extractor.pad(__lowercase,*__lowercase,**__lowercase )
if labels is not None:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.tokenizer.pad(__lowercase,**__lowercase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
SCREAMING_SNAKE_CASE_ : Any = labels["""input_ids"""]
return input_features
def __UpperCamelCase ( self : Any,*_A : Union[str, Any],**_A : Dict ):
"""simple docstring"""
return self.tokenizer.decode(*__lowercase,**__lowercase )
@contextmanager
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
"labels by using the argument `text` of the regular `__call__` method (either in the same call as "
"your audio inputs, or in a separate call." )
SCREAMING_SNAKE_CASE_ : Any = True
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer
yield
SCREAMING_SNAKE_CASE_ : List[Any] = self.feature_extractor
SCREAMING_SNAKE_CASE_ : int = False
| 18
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : List[Any] = {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/config.json",
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class a ( lowercase__ ):
"""simple docstring"""
a : List[Any] = 'xglm'
a : str = ['past_key_values']
a : Any = {
'num_attention_heads': 'attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Optional[int] , __lowercase : int=256008 , __lowercase : Tuple=2048 , __lowercase : List[Any]=1024 , __lowercase : str=4096 , __lowercase : Optional[Any]=24 , __lowercase : Optional[int]=16 , __lowercase : List[Any]="gelu" , __lowercase : str=0.1 , __lowercase : Dict=0.1 , __lowercase : Tuple=0.0 , __lowercase : Optional[int]=0.0 , __lowercase : Dict=0.02 , __lowercase : Optional[int]=True , __lowercase : Any=True , __lowercase : Dict=2 , __lowercase : Optional[Any]=1 , __lowercase : List[Any]=0 , __lowercase : Optional[Any]=2 , **__lowercase : List[str] , ) -> Optional[int]:
__UpperCAmelCase : List[str] = vocab_size
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[Any] = d_model
__UpperCAmelCase : str = ffn_dim
__UpperCAmelCase : List[str] = num_layers
__UpperCAmelCase : Dict = attention_heads
__UpperCAmelCase : str = activation_function
__UpperCAmelCase : Optional[Any] = dropout
__UpperCAmelCase : Any = attention_dropout
__UpperCAmelCase : int = activation_dropout
__UpperCAmelCase : Tuple = layerdrop
__UpperCAmelCase : Tuple = init_std
__UpperCAmelCase : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
__UpperCAmelCase : Union[str, Any] = use_cache
super().__init__(
pad_token_id=__lowercase , bos_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
| 114
| 0
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class lowerCAmelCase__ :
"""simple docstring"""
lowerCAmelCase__ = None
lowerCAmelCase__ = None
lowerCAmelCase__ = None # sigma(t_i)
@classmethod
def UpperCAmelCase__ ( cls : Dict ) -> List[Any]:
"""simple docstring"""
return cls()
@dataclass
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
class lowerCAmelCase__ ( a , a ):
"""simple docstring"""
@property
def UpperCAmelCase__ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
return True
@register_to_config
def __init__( self : str , __SCREAMING_SNAKE_CASE : float = 0.02 , __SCREAMING_SNAKE_CASE : float = 100 , __SCREAMING_SNAKE_CASE : float = 1.007 , __SCREAMING_SNAKE_CASE : float = 80 , __SCREAMING_SNAKE_CASE : float = 0.05 , __SCREAMING_SNAKE_CASE : float = 50 , ) -> str:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return KarrasVeSchedulerState.create()
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple = () ) -> KarrasVeSchedulerState:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = jnp.arange(0 , __SCREAMING_SNAKE_CASE )[::-1].copy()
__SCREAMING_SNAKE_CASE = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__SCREAMING_SNAKE_CASE , schedule=jnp.array(__SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) , timesteps=__SCREAMING_SNAKE_CASE , )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : random.KeyArray , ) -> Tuple[jnp.ndarray, float]:
"""simple docstring"""
if self.config.s_min <= sigma <= self.config.s_max:
__SCREAMING_SNAKE_CASE = min(self.config.s_churn / state.num_inference_steps , 2**0.5 - 1 )
else:
__SCREAMING_SNAKE_CASE = 0
# sample eps ~ N(0, S_noise^2 * I)
__SCREAMING_SNAKE_CASE = random.split(__SCREAMING_SNAKE_CASE , num=1 )
__SCREAMING_SNAKE_CASE = self.config.s_noise * random.normal(key=__SCREAMING_SNAKE_CASE , shape=sample.shape )
__SCREAMING_SNAKE_CASE = sigma + gamma * sigma
__SCREAMING_SNAKE_CASE = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sample_hat + sigma_hat * model_output
__SCREAMING_SNAKE_CASE = (sample_hat - pred_original_sample) / sigma_hat
__SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : jnp.ndarray , __SCREAMING_SNAKE_CASE : bool = True , ) -> Union[FlaxKarrasVeOutput, Tuple]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = sample_prev + sigma_prev * model_output
__SCREAMING_SNAKE_CASE = (sample_prev - pred_original_sample) / sigma_prev
__SCREAMING_SNAKE_CASE = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__SCREAMING_SNAKE_CASE , derivative=__SCREAMING_SNAKE_CASE , state=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : KarrasVeSchedulerState , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
raise NotImplementedError()
| 331
|
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : List[Any] = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase : Optional[int] = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCAmelCase__ ( a ):
"""simple docstring"""
lowerCAmelCase__ = "retribert"
def __init__( self : int , __SCREAMING_SNAKE_CASE : str=30_522 , __SCREAMING_SNAKE_CASE : int=768 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : List[str]=12 , __SCREAMING_SNAKE_CASE : List[str]=3_072 , __SCREAMING_SNAKE_CASE : int="gelu" , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : Optional[int]=0.1 , __SCREAMING_SNAKE_CASE : Dict=512 , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=1E-12 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Any=128 , __SCREAMING_SNAKE_CASE : Tuple=0 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
"""simple docstring"""
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = share_encoders
__SCREAMING_SNAKE_CASE = projection_dim
| 331
| 1
|
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
_UpperCAmelCase : Union[str, Any] = [
"""cross_validation.py""",
"""gradient_accumulation.py""",
"""local_sgd.py""",
"""multi_process_metrics.py""",
"""memory.py""",
"""automatic_gradient_accumulation.py""",
"""fsdp_with_peak_mem_tracking.py""",
"""deepspeed_with_config_support.py""",
"""megatron_lm_gpt_pretraining.py""",
]
class lowercase ( unittest.TestCase ):
def a ( self , snake_case , snake_case , snake_case = None , snake_case = None ):
snake_case_ = None
snake_case_ = os.path.abspath(os.path.join('examples' , 'by_feature' ) )
snake_case_ = os.path.abspath('examples' )
for item in os.listdir(snake_case ):
if item not in EXCLUDE_EXAMPLES:
snake_case_ = os.path.join(snake_case , snake_case )
if os.path.isfile(snake_case ) and ".py" in item_path:
with self.subTest(
tested_script=snake_case , feature_script=snake_case , tested_section='main()' if parser_only else 'training_function()' , ):
snake_case_ = compare_against_test(
os.path.join(snake_case , snake_case ) , snake_case , snake_case , snake_case )
snake_case_ = '\n'.join(snake_case )
if special_strings is not None:
for string in special_strings:
snake_case_ = diff.replace(snake_case , '' )
self.assertEqual(snake_case , '' )
def a ( self ):
self.one_complete_example('complete_nlp_example.py' , snake_case )
self.one_complete_example('complete_nlp_example.py' , snake_case )
def a ( self ):
snake_case_ = os.path.abspath(os.path.join('examples' , 'cv_example.py' ) )
snake_case_ = [
' ' * 16 + '{\n\n',
' ' * 20 + '"accuracy": eval_metric["accuracy"],\n\n',
' ' * 20 + '"f1": eval_metric["f1"],\n\n',
' ' * 20 + '"train_loss": total_loss.item() / len(train_dataloader),\n\n',
' ' * 20 + '"epoch": epoch,\n\n',
' ' * 16 + '},\n\n',
' ' * 16 + 'step=epoch,\n',
' ' * 12,
' ' * 8 + 'for step, batch in enumerate(active_dataloader):\n',
]
self.one_complete_example('complete_cv_example.py' , snake_case , snake_case , snake_case )
self.one_complete_example('complete_cv_example.py' , snake_case , snake_case , snake_case )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : Optional[int] = False
@classmethod
def a ( cls ):
super().setUpClass()
snake_case_ = tempfile.mkdtemp()
snake_case_ = os.path.join(cls._tmpdir , 'default_config.yml' )
write_basic_config(save_location=cls.configPath )
snake_case_ = ['accelerate', 'launch', '--config_file', cls.configPath]
@classmethod
def a ( cls ):
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def a ( self ):
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps epoch
--output_dir {self.tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'epoch_0' ) ) )
def a ( self ):
snake_case_ = F'''
examples/by_feature/checkpointing.py
--checkpointing_steps 1
--output_dir {self.tmpdir}
'''.split()
snake_case_ = run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , 'step_2' ) ) )
def a ( self ):
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=snake_case )
self.assertNotIn('epoch 0:' , snake_case )
self.assertIn('epoch 1:' , snake_case )
def a ( self ):
snake_case_ = F'''
examples/by_feature/checkpointing.py
--resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}
'''.split()
snake_case_ = run_command(self._launch_args + testargs , return_stdout=snake_case )
if torch.cuda.is_available():
snake_case_ = torch.cuda.device_count()
else:
snake_case_ = 1
if num_processes > 1:
self.assertNotIn('epoch 0:' , snake_case )
self.assertIn('epoch 1:' , snake_case )
else:
self.assertIn('epoch 0:' , snake_case )
self.assertIn('epoch 1:' , snake_case )
@slow
def a ( self ):
snake_case_ = '\n examples/by_feature/cross_validation.py\n --num_folds 2\n '.split()
with mock.patch.dict(os.environ , {'TESTING_MOCKED_DATALOADERS': '0'} ):
snake_case_ = run_command(self._launch_args + testargs , return_stdout=snake_case )
snake_case_ = re.findall('({.+})' , snake_case )
snake_case_ = [r for r in results if 'accuracy' in r][-1]
snake_case_ = ast.literal_eval(snake_case )
self.assertGreaterEqual(results['accuracy'] , 0.75 )
def a ( self ):
snake_case_ = ['examples/by_feature/multi_process_metrics.py']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'WANDB_MODE': 'offline'} )
def a ( self ):
with tempfile.TemporaryDirectory() as tmpdir:
snake_case_ = F'''
examples/by_feature/tracking.py
--with_tracking
--project_dir {tmpdir}
'''.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(snake_case , 'tracking' ) ) )
def a ( self ):
snake_case_ = ['examples/by_feature/gradient_accumulation.py']
run_command(self._launch_args + testargs )
def a ( self ):
snake_case_ = ['examples/by_feature/local_sgd.py']
run_command(self._launch_args + testargs )
| 285
|
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowercase ( lowercase_ ):
@staticmethod
@abstractmethod
def a ( snake_case ):
raise NotImplementedError()
@abstractmethod
def a ( self ):
raise NotImplementedError()
| 285
| 1
|
'''simple docstring'''
import inspect
import unittest
from transformers import BitConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel
from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class lowerCAmelCase__ :
def __init__( self : Dict , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any]=3 , lowerCamelCase__ : Dict=32 , lowerCamelCase__ : Any=3 , lowerCamelCase__ : List[Any]=10 , lowerCamelCase__ : List[Any]=[8, 16, 32, 64] , lowerCamelCase__ : Union[str, Any]=[1, 1, 2, 1] , lowerCamelCase__ : List[str]=True , lowerCamelCase__ : Any=True , lowerCamelCase__ : Optional[Any]="relu" , lowerCamelCase__ : Tuple=3 , lowerCamelCase__ : List[str]=None , lowerCamelCase__ : Optional[int]=["stage2", "stage3", "stage4"] , lowerCamelCase__ : Optional[Any]=[2, 3, 4] , lowerCamelCase__ : Optional[int]=1 , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Any = image_size
_UpperCAmelCase : str = num_channels
_UpperCAmelCase : List[Any] = embeddings_size
_UpperCAmelCase : str = hidden_sizes
_UpperCAmelCase : Dict = depths
_UpperCAmelCase : Union[str, Any] = is_training
_UpperCAmelCase : Tuple = use_labels
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Tuple = num_labels
_UpperCAmelCase : Dict = scope
_UpperCAmelCase : str = len(lowerCamelCase__ )
_UpperCAmelCase : Union[str, Any] = out_features
_UpperCAmelCase : Dict = out_indices
_UpperCAmelCase : str = num_groups
def lowerCAmelCase__ ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
return BitConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , )
def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : int , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = BitModel(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Optional[int] = model(lowerCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Optional[int] = BitForImageClassification(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : List[str] = model(lowerCamelCase__ , labels=lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase : int = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Tuple = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_UpperCAmelCase : int = None
_UpperCAmelCase : List[Any] = BitBackbone(config=lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
_UpperCAmelCase : Union[str, Any] = model(lowerCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase__ ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_UpperCAmelCase : List[str] = config_and_inputs
_UpperCAmelCase : Optional[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : int = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else ()
lowerCAmelCase : Dict = (
{"feature-extraction": BitModel, "image-classification": BitForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : Optional[int] = False
lowerCAmelCase : str = False
lowerCAmelCase : Any = False
lowerCAmelCase : Dict = False
lowerCAmelCase : Dict = False
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Tuple = BitModelTester(self )
_UpperCAmelCase : int = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[Any]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase__ ( self : List[str] ) ->str:
'''simple docstring'''
return
@unittest.skip(reason="Bit does not output attentions" )
def lowerCAmelCase__ ( self : Dict ) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="Bit does not use inputs_embeds" )
def lowerCAmelCase__ ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="Bit does not support input and output embeddings" )
def lowerCAmelCase__ ( self : Tuple ) ->str:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(lowerCamelCase__ )
_UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : int = [*signature.parameters.keys()]
_UpperCAmelCase : Dict = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase__ )
def lowerCAmelCase__ ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Tuple = model_class(config=lowerCamelCase__ )
for name, module in model.named_modules():
if isinstance(lowerCamelCase__ , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : Dict ):
_UpperCAmelCase : Union[str, Any] = model_class(lowerCamelCase__ )
model.to(lowerCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Any = model(**self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_UpperCAmelCase : Optional[int] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_UpperCAmelCase : List[Any] = self.model_tester.num_stages
self.assertEqual(len(lowerCamelCase__ ) , expected_num_stages + 1 )
# Bit's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[str] = ["preactivation", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
_UpperCAmelCase : Optional[int] = layer_type
_UpperCAmelCase : Any = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : List[str] = True
check_hidden_states_output(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(reason="Bit does not use feedforward chunking" )
def lowerCAmelCase__ ( self : str ) ->List[Any]:
'''simple docstring'''
pass
def lowerCAmelCase__ ( self : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase__ )
@slow
def lowerCAmelCase__ ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase : List[Any] = BitModel.from_pretrained(lowerCamelCase__ )
self.assertIsNotNone(lowerCamelCase__ )
def __lowerCAmelCase ():
_UpperCAmelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase__ ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
return (
BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None
)
@slow
def lowerCAmelCase__ ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase : str = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase__ )
_UpperCAmelCase : Tuple = self.default_image_processor
_UpperCAmelCase : List[str] = prepare_img()
_UpperCAmelCase : Any = image_processor(images=lowerCamelCase__ , return_tensors="pt" ).to(lowerCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase : List[str] = model(**lowerCamelCase__ )
# verify the logits
_UpperCAmelCase : Dict = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = torch.tensor([[-0.6_5_2_6, -0.5_2_6_3, -1.4_3_9_8]] ).to(lowerCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase__ , atol=1E-4 ) )
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
lowerCAmelCase : int = (BitBackbone,) if is_torch_available() else ()
lowerCAmelCase : Any = BitConfig
lowerCAmelCase : int = False
def lowerCAmelCase__ ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : Dict = BitModelTester(self )
| 354
|
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCamelCase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : int ) ->str:
'''simple docstring'''
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Union[str, Any] = [label.strip() for label in labels.split("," ) if label.strip()]
return labels
def __call__( self : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[Any] ) ->str:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0 or len(lowerCamelCase__ ) == 0:
raise ValueError("You must include at least one label and at least one sequence." )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
"The provided hypothesis_template \"{}\" was not able to be formatted with the target labels. "
"Make sure the passed template includes formatting syntax such as {{}} where the label should go."
).format(lowerCamelCase__ ) )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ):
_UpperCAmelCase : Optional[Any] = [sequences]
_UpperCAmelCase : int = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(lowerCamelCase__ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(UpperCAmelCase__ )
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , lowerCamelCase__ : Optional[Any]=ZeroShotClassificationArgumentHandler() , *lowerCamelCase__ : List[str] , **lowerCamelCase__ : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple = args_parser
super().__init__(*lowerCamelCase__ , **lowerCamelCase__ )
if self.entailment_id == -1:
logger.warning(
"Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to "
"-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." )
@property
def lowerCAmelCase__ ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith("entail" ):
return ind
return -1
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int=True , lowerCamelCase__ : Optional[int]=True , lowerCamelCase__ : str=TruncationStrategy.ONLY_FIRST , **lowerCamelCase__ : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase : int = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
"Tokenizer was not supporting padding necessary for zero-shot, attempting to use "
" `pad_token=eos_token`" )
_UpperCAmelCase : Optional[Any] = self.tokenizer.eos_token
try:
_UpperCAmelCase : List[str] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=lowerCamelCase__ , )
except Exception as e:
if "too short" in str(lowerCamelCase__ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
_UpperCAmelCase : List[Any] = self.tokenizer(
lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , padding=lowerCamelCase__ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def lowerCAmelCase__ ( self : int , **lowerCamelCase__ : Union[str, Any] ) ->Tuple:
'''simple docstring'''
if kwargs.get("multi_class" , lowerCamelCase__ ) is not None:
_UpperCAmelCase : int = kwargs["multi_class"]
logger.warning(
"The `multi_class` argument has been deprecated and renamed to `multi_label`. "
"`multi_class` will be removed in a future version of Transformers." )
_UpperCAmelCase : Dict = {}
if "candidate_labels" in kwargs:
_UpperCAmelCase : List[Any] = self._args_parser._parse_labels(kwargs["candidate_labels"] )
if "hypothesis_template" in kwargs:
_UpperCAmelCase : Dict = kwargs["hypothesis_template"]
_UpperCAmelCase : List[str] = {}
if "multi_label" in kwargs:
_UpperCAmelCase : Optional[Any] = kwargs["multi_label"]
return preprocess_params, {}, postprocess_params
def __call__( self : int , lowerCamelCase__ : Union[str, List[str]] , *lowerCamelCase__ : str , **lowerCamelCase__ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
if len(lowerCamelCase__ ) == 0:
pass
elif len(lowerCamelCase__ ) == 1 and "candidate_labels" not in kwargs:
_UpperCAmelCase : int = args[0]
else:
raise ValueError(F"""Unable to understand extra arguments {args}""" )
return super().__call__(lowerCamelCase__ , **lowerCamelCase__ )
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any=None , lowerCamelCase__ : str="This example is {}." ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase : Optional[int] = self._args_parser(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
for i, (candidate_label, sequence_pair) in enumerate(zip(lowerCamelCase__ , lowerCamelCase__ ) ):
_UpperCAmelCase : Optional[int] = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(lowerCamelCase__ ) - 1,
**model_input,
}
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase__ : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase : Dict = inputs["candidate_label"]
_UpperCAmelCase : Optional[int] = inputs["sequence"]
_UpperCAmelCase : Dict = {k: inputs[k] for k in self.tokenizer.model_input_names}
_UpperCAmelCase : List[Any] = self.model(**lowerCamelCase__ )
_UpperCAmelCase : Optional[Any] = {
"candidate_label": candidate_label,
"sequence": sequence,
"is_last": inputs["is_last"],
**outputs,
}
return model_outputs
def lowerCAmelCase__ ( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Any = [outputs["candidate_label"] for outputs in model_outputs]
_UpperCAmelCase : Any = [outputs["sequence"] for outputs in model_outputs]
_UpperCAmelCase : Optional[int] = np.concatenate([output["logits"].numpy() for output in model_outputs] )
_UpperCAmelCase : Optional[Any] = logits.shape[0]
_UpperCAmelCase : Any = len(lowerCamelCase__ )
_UpperCAmelCase : str = N // n
_UpperCAmelCase : str = logits.reshape((num_sequences, n, -1) )
if multi_label or len(lowerCamelCase__ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
_UpperCAmelCase : int = self.entailment_id
_UpperCAmelCase : List[Any] = -1 if entailment_id == 0 else 0
_UpperCAmelCase : str = reshaped_outputs[..., [contradiction_id, entailment_id]]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : str = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
_UpperCAmelCase : int = reshaped_outputs[..., self.entailment_id]
_UpperCAmelCase : Union[str, Any] = np.exp(lowerCamelCase__ ) / np.exp(lowerCamelCase__ ).sum(-1 , keepdims=lowerCamelCase__ )
_UpperCAmelCase : Optional[int] = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 322
| 0
|
_lowerCamelCase =[sum(int(c, 1_0) ** 2 for c in i.__str__()) for i in range(1_0_0_0_0_0)]
def _a ( lowerCamelCase ):
lowerCamelCase : List[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 10_0000]
number //= 10_0000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCamelCase =[None] * 1_0_0_0_0_0_0_0
_lowerCamelCase =True
_lowerCamelCase =False
def _a ( lowerCamelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCamelCase : Tuple = chain(next_number(lowerCamelCase ) )
lowerCamelCase : Dict = number_chain
while number < 1000_0000:
lowerCamelCase : Optional[int] = number_chain
number *= 10
return number_chain
def _a ( lowerCamelCase = 1000_0000 ):
for i in range(1, lowerCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution() = }''')
| 287
|
def _a ( lowerCamelCase ):
if p < 2:
raise ValueError("""p should not be less than 2!""" )
elif p == 2:
return True
lowerCamelCase : Any = 4
lowerCamelCase : List[str] = (1 << p) - 1
for _ in range(p - 2 ):
lowerCamelCase : List[Any] = ((s * s) - 2) % m
return s == 0
if __name__ == "__main__":
print(lucas_lehmer_test(7))
print(lucas_lehmer_test(1_1))
| 287
| 1
|
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case__ :
"""simple docstring"""
def __init__( self , __lowercase , __lowercase=2 , __lowercase=3 , __lowercase=4 , __lowercase=2 , __lowercase=7 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase=9_9 , __lowercase=3_6 , __lowercase=3 , __lowercase=4 , __lowercase=3_7 , __lowercase="gelu" , __lowercase=0.1 , __lowercase=0.1 , __lowercase=5_1_2 , __lowercase=1_6 , __lowercase=2 , __lowercase=0.0_2 , __lowercase=6 , __lowercase=6 , __lowercase=3 , __lowercase=4 , __lowercase=None , __lowercase=1_0_0_0 , ) -> Dict:
"""simple docstring"""
a__ : Optional[Any] = parent
a__ : Optional[int] = batch_size
a__ : List[str] = num_channels
a__ : Optional[Any] = image_size
a__ : int = patch_size
a__ : int = text_seq_length
a__ : Union[str, Any] = is_training
a__ : Any = use_input_mask
a__ : Optional[int] = use_token_type_ids
a__ : str = use_labels
a__ : Union[str, Any] = vocab_size
a__ : List[str] = hidden_size
a__ : int = num_hidden_layers
a__ : Optional[int] = num_attention_heads
a__ : Optional[Any] = intermediate_size
a__ : List[str] = hidden_act
a__ : str = hidden_dropout_prob
a__ : str = attention_probs_dropout_prob
a__ : Union[str, Any] = max_position_embeddings
a__ : List[str] = type_vocab_size
a__ : str = type_sequence_label_size
a__ : Optional[int] = initializer_range
a__ : int = coordinate_size
a__ : str = shape_size
a__ : Optional[Any] = num_labels
a__ : List[str] = num_choices
a__ : Optional[Any] = scope
a__ : str = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a__ : Optional[int] = text_seq_length
a__ : Dict = (image_size // patch_size) ** 2 + 1
a__ : str = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
a__ : Union[str, Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
a__ : Tuple = bbox[i, j, 3]
a__ : List[Any] = bbox[i, j, 1]
a__ : Optional[Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a__ : List[str] = bbox[i, j, 2]
a__ : Union[str, Any] = bbox[i, j, 0]
a__ : str = t
a__ : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a__ : List[str] = None
if self.use_input_mask:
a__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
a__ : List[str] = None
if self.use_token_type_ids:
a__ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
a__ : int = None
a__ : Tuple = None
if self.use_labels:
a__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a__ : Optional[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
a__ : List[str] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : str = LayoutLMvaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
# text + image
a__ : List[str] = model(__lowercase , pixel_values=__lowercase )
a__ : List[str] = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase )
a__ : Dict = model(__lowercase , bbox=__lowercase , pixel_values=__lowercase , token_type_ids=__lowercase )
a__ : Union[str, Any] = model(__lowercase , bbox=__lowercase , pixel_values=__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
a__ : Optional[int] = model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
a__ : Any = model(pixel_values=__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Union[str, Any] = self.num_labels
a__ : Optional[int] = LayoutLMvaForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
a__ : Any = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Tuple:
"""simple docstring"""
a__ : List[Any] = self.num_labels
a__ : List[Any] = LayoutLMvaForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
a__ : List[Any] = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> int:
"""simple docstring"""
a__ : List[Any] = LayoutLMvaForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
a__ : Dict = model(
__lowercase , bbox=__lowercase , pixel_values=__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , start_positions=__lowercase , end_positions=__lowercase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : str = self.prepare_config_and_inputs()
(
a__
) : Tuple = config_and_inputs
a__ : List[str] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class snake_case__ (A__ , A__ , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = False
__lowerCAmelCase :Dict = False
__lowerCAmelCase :Any = False
__lowerCAmelCase :Tuple = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__lowerCAmelCase :str = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Union[str, Any] = LayoutLMvaModelTester(self )
a__ : List[Any] = ConfigTester(self , config_class=__lowercase , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase , __lowercase=False ) -> List[str]:
"""simple docstring"""
a__ : Optional[int] = copy.deepcopy(__lowercase )
if model_class in get_values(__lowercase ):
a__ : Union[str, Any] = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__lowercase , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__lowercase ):
a__ : Any = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
elif model_class in get_values(__lowercase ):
a__ : Optional[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
a__ : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
elif model_class in [
*get_values(__lowercase ),
]:
a__ : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowercase )
elif model_class in [
*get_values(__lowercase ),
]:
a__ : Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__lowercase , )
return inputs_dict
def SCREAMING_SNAKE_CASE__( self ) -> List[str]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a__ : Optional[int] = type
self.model_tester.create_and_check_model(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Tuple:
"""simple docstring"""
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> Optional[int]:
"""simple docstring"""
a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowercase )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[int] = LayoutLMvaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
a__ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=__lowercase ) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
a__ : Any = LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__lowercase )
a__ : List[Any] = self.default_image_processor
a__ : Union[str, Any] = prepare_img()
a__ : Any = image_processor(images=__lowercase , return_tensors="""pt""" ).pixel_values.to(__lowercase )
a__ : Dict = torch.tensor([[1, 2]] )
a__ : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
a__ : List[Any] = model(
input_ids=input_ids.to(__lowercase ) , bbox=bbox.to(__lowercase ) , pixel_values=pixel_values.to(__lowercase ) , )
# verify the logits
a__ : List[Any] = torch.Size((1, 1_9_9, 7_6_8) )
self.assertEqual(outputs.last_hidden_state.shape , __lowercase )
a__ : int = torch.tensor(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __lowercase , atol=1E-4 ) )
| 363
|
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def lowerCAmelCase_ ( _lowercase : List[str]) -> Union[str, Any]:
"""simple docstring"""
a__ : List[str] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_lowercase , _lowercase)
def lowerCAmelCase_ ( _lowercase : List[Any]) -> Optional[Any]:
"""simple docstring"""
a__ , a__ : Union[str, Any] = emb.weight.shape
a__ : str = nn.Linear(_lowercase , _lowercase , bias=_lowercase)
a__ : Any = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( _lowercase : int , _lowercase : int=None) -> List[Any]:
"""simple docstring"""
a__ : List[str] = {}
for old_key in state_dict.keys():
a__ : Any = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
a__ : Dict = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''')
else:
a__ : Optional[int] = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""")
if "gate" in key:
a__ : Tuple = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""")
if "fc2" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc2.""" , """.ffn.fc2.""")
if "fc1" and "experts" not in key:
a__ : Optional[int] = key.replace(""".fc1.""" , """.ffn.fc1.""")
if ".encoder_attn." in key:
a__ : Tuple = key.replace(""".encoder_attn.""" , """.cross_attention.""")
if "encoder_attn_layer_norm" in key:
a__ : Optional[Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""")
if "final_layer_norm" in key:
a__ : List[str] = key.replace("""final_layer_norm""" , """ff_layer_norm""")
a__ : str = state_dict[old_key]
return new_dict
def lowerCAmelCase_ ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Dict , _lowercase : str = WEIGHTS_NAME) -> Tuple:
"""simple docstring"""
a__ : Tuple = []
a__ : Optional[Any] = 0
os.makedirs(_lowercase , exist_ok=_lowercase)
for expert in range(_lowercase):
a__ : str = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(_lowercase):
a__ : List[str] = torch.load(_lowercase)["""model"""]
remove_ignore_keys_(_lowercase)
a__ : Tuple = rename_fairseq_keys(_lowercase , _lowercase)
a__ : str = os.path.join(
_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
torch.save(_lowercase , _lowercase)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(_lowercase)[0]].dtype)
# Add the last block
a__ : int = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase)+1:05d}-of-???.bin'''))
a__ : Union[str, Any] = torch.load(switch_checkpoint_path + """-shared.pt""")["""model"""]
remove_ignore_keys_(_lowercase)
a__ : List[str] = rename_fairseq_keys(_lowercase , _lowercase)
a__ : int = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_lowercase) == 1:
a__ : Optional[int] = os.path.join(_lowercase , _lowercase)
torch.save(_lowercase , _lowercase)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_lowercase , _lowercase)
# Otherwise, let's build the index
a__ : List[str] = {}
for idx, shard in enumerate(_lowercase):
a__ : Union[str, Any] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase):05d}.bin''')
a__ : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(_lowercase , os.path.join(_lowercase , _lowercase))
for key in shard:
a__ : Tuple = shard_file
# Add the metadata
a__ : Tuple = {"""total_size""": total_size}
a__ : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_lowercase , _lowercase) , """w""" , encoding="""utf-8""") as f:
a__ : Dict = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase) + """\n"""
f.write(_lowercase)
return metadata, index
if __name__ == "__main__":
_lowercase : Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowercase : Tuple =parser.parse_args()
_lowercase , _lowercase : List[Any] =shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
_lowercase : int =NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowercase : List[str] =NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 266
| 0
|
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( lowerCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = AudioLDMPipeline
UpperCamelCase_ : Tuple = TEXT_TO_AUDIO_PARAMS
UpperCamelCase_ : str = TEXT_TO_AUDIO_BATCH_PARAMS
UpperCamelCase_ : List[str] = frozenset(
[
'''num_inference_steps''',
'''num_waveforms_per_prompt''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=(3_2, 6_4) , class_embed_type="simple_projection" , projection_class_embeddings_input_dim=3_2 , class_embeddings_concat=__UpperCAmelCase , )
_UpperCAmelCase : Dict = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , )
torch.manual_seed(0 )
_UpperCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=1 , out_channels=1 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , projection_dim=3_2 , )
_UpperCAmelCase : Optional[Any] = ClapTextModelWithProjection(__UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = RobertaTokenizer.from_pretrained("hf-internal-testing/tiny-random-roberta" , model_max_length=7_7 )
_UpperCAmelCase : List[str] = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=1_6_0_0_0 , upsample_initial_channel=1_6 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__UpperCAmelCase , )
_UpperCAmelCase : Optional[Any] = SpeechTaHifiGan(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"vocoder": vocoder,
}
return components
def _lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : int=0 ) -> List[str]:
"""simple docstring"""
if str(__UpperCAmelCase ).startswith("mps" ):
_UpperCAmelCase : Union[str, Any] = torch.manual_seed(__UpperCAmelCase )
else:
_UpperCAmelCase : Union[str, Any] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_UpperCAmelCase : Tuple = {
"prompt": "A hammer hitting a wooden surface",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
}
return inputs
def _lowerCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Dict = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : int = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 2_5_6
_UpperCAmelCase : List[str] = audio[:1_0]
_UpperCAmelCase : List[Any] = np.array(
[-0.0050, 0.0050, -0.0060, 0.0033, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : int ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : Dict = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Tuple = audioldm_pipe.to(__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Tuple = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase : Optional[Any] = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : List[str] = output.audios[0]
_UpperCAmelCase : str = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : Tuple = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : Any = audioldm_pipe.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
_UpperCAmelCase : List[Any] = text_inputs["input_ids"].to(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = audioldm_pipe.text_encoder(
__UpperCAmelCase , )
_UpperCAmelCase : Optional[Any] = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCAmelCase : Optional[int] = F.normalize(__UpperCAmelCase , dim=-1 )
_UpperCAmelCase : Dict = prompt_embeds
# forward
_UpperCAmelCase : Optional[int] = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : Any = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _lowerCAmelCase ( self : Any ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.get_dummy_components()
_UpperCAmelCase : Tuple = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : List[Any] = audioldm_pipe.to(__UpperCAmelCase )
_UpperCAmelCase : Any = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Optional[Any] = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : str = 3 * ["this is a negative prompt"]
_UpperCAmelCase : Dict = negative_prompt
_UpperCAmelCase : Optional[Any] = 3 * [inputs["prompt"]]
# forward
_UpperCAmelCase : Tuple = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = output.audios[0]
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = 3 * [inputs.pop("prompt" )]
_UpperCAmelCase : Tuple = []
for p in [prompt, negative_prompt]:
_UpperCAmelCase : Any = audioldm_pipe.tokenizer(
__UpperCAmelCase , padding="max_length" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__UpperCAmelCase , return_tensors="pt" , )
_UpperCAmelCase : Union[str, Any] = text_inputs["input_ids"].to(__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = audioldm_pipe.text_encoder(
__UpperCAmelCase , )
_UpperCAmelCase : Union[str, Any] = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
_UpperCAmelCase : Optional[Any] = F.normalize(__UpperCAmelCase , dim=-1 )
embeds.append(__UpperCAmelCase )
_UpperCAmelCase , _UpperCAmelCase : Dict = embeds
# forward
_UpperCAmelCase : Any = audioldm_pipe(**__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : Dict = self.get_dummy_components()
_UpperCAmelCase : Optional[Any] = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_UpperCAmelCase : Tuple = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : int = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : Tuple = "egg cracking"
_UpperCAmelCase : int = audioldm_pipe(**__UpperCAmelCase , negative_prompt=__UpperCAmelCase )
_UpperCAmelCase : Any = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 2_5_6
_UpperCAmelCase : List[Any] = audio[:1_0]
_UpperCAmelCase : str = np.array(
[-0.0051, 0.0050, -0.0060, 0.0034, -0.0026, 0.0033, -0.0027, 0.0033, -0.0028, 0.0032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : List[Any] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : str = self.get_dummy_components()
_UpperCAmelCase : Dict = PNDMScheduler(skip_prk_steps=__UpperCAmelCase )
_UpperCAmelCase : Any = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Tuple = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : List[Any] = "A hammer hitting a wooden surface"
# test num_waveforms_per_prompt=1 (default)
_UpperCAmelCase : Dict = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 ).audios
assert audios.shape == (1, 2_5_6)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
_UpperCAmelCase : Any = 2
_UpperCAmelCase : Optional[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 2_5_6)
# test num_waveforms_per_prompt for single prompt
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : List[Any] = audioldm_pipe(__UpperCAmelCase , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios
assert audios.shape == (num_waveforms_per_prompt, 2_5_6)
# test num_waveforms_per_prompt for batch of prompts
_UpperCAmelCase : Tuple = 2
_UpperCAmelCase : Dict = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__UpperCAmelCase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_5_6)
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : str = "cpu" # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : Tuple = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe.vocoder.config.sampling_rate
_UpperCAmelCase : Optional[int] = self.get_dummy_inputs(__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe(audio_length_in_s=0.016 , **__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.016
_UpperCAmelCase : Dict = audioldm_pipe(audio_length_in_s=0.032 , **__UpperCAmelCase )
_UpperCAmelCase : int = output.audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) / vocoder_sampling_rate == 0.032
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
_UpperCAmelCase : List[Any] = AudioLDMPipeline(**__UpperCAmelCase )
_UpperCAmelCase : Tuple = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Tuple = ["hey"]
_UpperCAmelCase : int = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 )
_UpperCAmelCase : List[str] = output.audios.shape
assert audio_shape == (1, 2_5_6)
_UpperCAmelCase : List[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
_UpperCAmelCase : Dict = SpeechTaHifiGan(__UpperCAmelCase ).to(__UpperCAmelCase )
_UpperCAmelCase : str = audioldm_pipe(__UpperCAmelCase , num_inference_steps=1 )
_UpperCAmelCase : List[str] = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 2_5_6)
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__UpperCAmelCase )
def _lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
self._test_inference_batch_single_identical(test_mean_pixel_difference=__UpperCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__UpperCAmelCase )
@slow
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase__ : Any , lowerCAmelCase__ : Any="cpu" , lowerCAmelCase__ : Union[str, Any]=torch.floataa , lowerCAmelCase__ : List[Any]=0 ) -> int:
"""simple docstring"""
_UpperCAmelCase : List[str] = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = np.random.RandomState(__UpperCAmelCase ).standard_normal((1, 8, 1_2_8, 1_6) )
_UpperCAmelCase : Optional[Any] = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase , dtype=__UpperCAmelCase )
_UpperCAmelCase : Union[str, Any] = {
"prompt": "A hammer hitting a wooden surface",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 2.5,
}
return inputs
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_UpperCAmelCase : int = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : List[str] = self.get_inputs(__UpperCAmelCase )
_UpperCAmelCase : List[str] = 2_5
_UpperCAmelCase : Union[str, Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 8_1_9_2_0
_UpperCAmelCase : List[str] = audio[7_7_2_3_0:7_7_2_4_0]
_UpperCAmelCase : Dict = np.array(
[-0.4884, -0.4607, 0.0023, 0.5007, 0.5896, 0.5151, 0.3813, -0.0208, -0.3687, -0.4315] )
_UpperCAmelCase : int = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Any = AudioLDMPipeline.from_pretrained("cvssp/audioldm" )
_UpperCAmelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
_UpperCAmelCase : int = audioldm_pipe.to(__UpperCAmelCase )
audioldm_pipe.set_progress_bar_config(disable=__UpperCAmelCase )
_UpperCAmelCase : Optional[int] = self.get_inputs(__UpperCAmelCase )
_UpperCAmelCase : List[Any] = audioldm_pipe(**__UpperCAmelCase ).audios[0]
assert audio.ndim == 1
assert len(__UpperCAmelCase ) == 8_1_9_2_0
_UpperCAmelCase : str = audio[2_7_7_8_0:2_7_7_9_0]
_UpperCAmelCase : Union[str, Any] = np.array([-0.2131, -0.0873, -0.0124, -0.0189, 0.0569, 0.1373, 0.1883, 0.2886, 0.3297, 0.2212] )
_UpperCAmelCase : str = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 145
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 330
| 0
|
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( _lowercase : int ) ->list[int]:
'''simple docstring'''
if length <= 0 or not isinstance(_lowercase , _lowercase ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(_lowercase )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 79
|
"""simple docstring"""
from __future__ import annotations
from math import pow, sqrt
def _SCREAMING_SNAKE_CASE ( _lowercase : float , _lowercase : float , _lowercase : float ) ->dict[str, float]:
'''simple docstring'''
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(_lowercase , 2 ) - pow(_lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(_lowercase , 2 ) + pow(_lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 79
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.