code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
'''simple docstring'''
from ...processing_utils import ProcessorMixin
class __lowercase ( _lowercase ):
lowerCamelCase : Union[str, Any] = ["image_processor", "feature_extractor"]
lowerCamelCase : Dict = "TvltImageProcessor"
lowerCamelCase : Optional[int] = "TvltFeatureExtractor"
def __init__(self , A , A ):
super().__init__(image_processor=A , feature_extractor=A )
lowerCamelCase_ : Union[str, Any] = image_processor
lowerCamelCase_ : Union[str, Any] = feature_extractor
def __call__(self , A=None , A=None , A=None , A=None , A=False , A=False , *A , **A , ):
if images is None and audio is None:
raise ValueError('''You need to specify either an `images` or `audio` input to process.''' )
lowerCamelCase_ : Union[str, Any] = None
if images is not None:
lowerCamelCase_ : Optional[int] = self.image_processor(A , mask_pixel=A , *A , **A )
if images_mixed is not None:
lowerCamelCase_ : int = self.image_processor(A , is_mixed=A , *A , **A )
if audio is not None:
lowerCamelCase_ : Dict = self.feature_extractor(
A , *A , sampling_rate=A , mask_audio=A , **A )
lowerCamelCase_ : int = {}
if audio is not None:
output_dict.update(A )
if images is not None:
output_dict.update(A )
if images_mixed_dict is not None:
output_dict.update(A )
return output_dict
@property
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = self.image_processor.model_input_names
lowerCamelCase_ : Any = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 318
|
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class __lowercase ( unittest.TestCase ):
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[str] = tempfile.mkdtemp()
lowerCamelCase_ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''的''',
'''价''',
'''格''',
'''是''',
'''15''',
'''便''',
'''alex''',
'''##andra''',
''',''',
'''。''',
'''-''',
'''t''',
'''shirt''',
]
lowerCamelCase_ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowerCamelCase_ : Tuple = {
'''do_resize''': True,
'''size''': {'''height''': 2_2_4, '''width''': 2_2_4},
'''do_center_crop''': True,
'''crop_size''': {'''height''': 1_8, '''width''': 1_8},
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
'''do_convert_rgb''': True,
}
lowerCamelCase_ : Tuple = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A , A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizer.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return BertTokenizerFast.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self , **A ):
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def UpperCAmelCase__ (self ):
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowerCamelCase_ : Optional[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ (self ):
lowerCamelCase_ : str = self.get_tokenizer()
lowerCamelCase_ : List[Any] = self.get_rust_tokenizer()
lowerCamelCase_ : List[Any] = self.get_image_processor()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Any = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
lowerCamelCase_ : List[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ : List[str] = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' )
lowerCamelCase_ : Dict = self.get_image_processor(do_normalize=A )
lowerCamelCase_ : Tuple = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : List[str] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = self.prepare_image_inputs()
lowerCamelCase_ : List[Any] = image_processor(A , return_tensors='''np''' )
lowerCamelCase_ : Optional[int] = processor(images=A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Dict = self.get_image_processor()
lowerCamelCase_ : Union[str, Any] = self.get_tokenizer()
lowerCamelCase_ : str = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : int = processor(text=A )
lowerCamelCase_ : Dict = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Union[str, Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Any = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : List[Any] = self.prepare_image_inputs()
lowerCamelCase_ : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = self.get_image_processor()
lowerCamelCase_ : int = self.get_tokenizer()
lowerCamelCase_ : Any = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ : Union[str, Any] = processor.batch_decode(A )
lowerCamelCase_ : Any = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = self.get_image_processor()
lowerCamelCase_ : Optional[int] = self.get_tokenizer()
lowerCamelCase_ : Optional[Any] = ChineseCLIPProcessor(tokenizer=A , image_processor=A )
lowerCamelCase_ : int = '''Alexandra,T-shirt的价格是15便士。'''
lowerCamelCase_ : str = self.prepare_image_inputs()
lowerCamelCase_ : int = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 318
| 1
|
def lowercase_ ( _lowerCamelCase : int):
lowercase__ : List[str] = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def lowercase_ ( _lowerCamelCase : int = 100):
lowercase__ : Any = 1
lowercase__ : str = 2
for i in range(2 , max_n + 1):
lowercase__ : List[str] = pre_numerator
lowercase__ : Optional[int] = 2 * i // 3 if i % 3 == 0 else 1
lowercase__ : Any = cur_numerator
lowercase__ : List[str] = e_cont * pre_numerator + temp
return sum_digits(_lowerCamelCase)
if __name__ == "__main__":
print(f"{solution() = }")
| 356
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class snake_case_ ( __A ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
__A : str = field(default="text-classification" ,metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"text": Value("string" )} )
__A : ClassVar[Features] = Features({"labels": ClassLabel} )
__A : str = "text"
__A : str = "labels"
def __UpperCamelCase ( self : Dict , lowercase_ : Optional[Any] ) -> int:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , lowercase_ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowercase__ : Optional[int] = copy.deepcopy(self )
lowercase__ : Tuple = self.label_schema.copy()
lowercase__ : Union[str, Any] = features[self.label_column]
lowercase__ : int = label_schema
return task_template
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 333
| 0
|
"""simple docstring"""
from __future__ import annotations
from math import gcd
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase = 2 , __lowerCAmelCase = 1 , __lowerCAmelCase = 3 , ) -> int | None:
# A value less than 2 can cause an infinite loop in the algorithm.
if num < 2:
raise ValueError("""The input value cannot be less than 2""" )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
return (pow(__lowerCAmelCase , 2 ) + step) % modulus
for _ in range(__lowerCAmelCase ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = seed
SCREAMING_SNAKE_CASE__ : int = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE__ : str = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = rand_fn(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE__ : Optional[Any] = gcd(hare - tortoise , __lowerCAmelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a :Any = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
a :Tuple = parser.parse_args()
a :List[str] = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
a :Union[str, Any] = args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 132
|
"""simple docstring"""
a :dict[tuple[int, int, int], int] = {}
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
SCREAMING_SNAKE_CASE__ : str = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
SCREAMING_SNAKE_CASE__ : Tuple = _calculate(days - 1 , __lowerCAmelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
SCREAMING_SNAKE_CASE__ : List[str] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
SCREAMING_SNAKE_CASE__ : Optional[Any] = _calculate(days - 1 , __lowerCAmelCase , 0 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_late + state_absent + state_ontime
SCREAMING_SNAKE_CASE__ : Optional[int] = prizestrings
return prizestrings
def _lowercase ( __lowerCAmelCase = 30 ) -> int:
return _calculate(__lowerCAmelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 132
| 1
|
"""simple docstring"""
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
UpperCAmelCase__ = False
UpperCAmelCase__ = True
UpperCAmelCase__ = False
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--repo_path',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
UpperCAmelCase__ = parser.parse_args()
UpperCAmelCase__ = {
'image_size': 'sample_size',
'num_res_blocks': 'layers_per_block',
'block_channels': 'block_out_channels',
'down_blocks': 'down_block_types',
'up_blocks': 'up_block_types',
'downscale_freq_shift': 'freq_shift',
'resnet_num_groups': 'norm_num_groups',
'resnet_act_fn': 'act_fn',
'resnet_eps': 'norm_eps',
'num_head_channels': 'attention_head_dim',
}
UpperCAmelCase__ = {
'time_steps': 'time_proj',
'mid': 'mid_block',
'downsample_blocks': 'down_blocks',
'upsample_blocks': 'up_blocks',
}
UpperCAmelCase__ = '' if has_file(args.repo_path, 'config.json') else 'unet'
with open(os.path.join(args.repo_path, subfolder, 'config.json'), 'r', encoding='utf-8') as reader:
UpperCAmelCase__ = reader.read()
UpperCAmelCase__ = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, 'config.json'):
UpperCAmelCase__ = UNetaDModel(**config)
else:
UpperCAmelCase__ = UNetaDConditionModel if 'ldm-text2im-large-256' in args.repo_path else UNetaDModel
UpperCAmelCase__ = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
UpperCAmelCase__ = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
UpperCAmelCase__ = config[key]
del config[key]
UpperCAmelCase__ = [k.replace('UNetRes', '') for k in config['down_block_types']]
UpperCAmelCase__ = [k.replace('UNetRes', '') for k in config['up_block_types']]
if do_only_weights:
UpperCAmelCase__ = torch.load(os.path.join(args.repo_path, subfolder, 'diffusion_pytorch_model.bin'))
UpperCAmelCase__ = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('.op.bias') or param_key.endswith('.op.weight'):
continue
UpperCAmelCase__ = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('.')[0] == key:
UpperCAmelCase__ = param_value
UpperCAmelCase__ = True
if not has_changed:
UpperCAmelCase__ = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 360
|
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCAmelCase__ = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
UpperCAmelCase__ = {
'allenai/led-base-16384': 16384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _UpperCAmelCase ( ) -> Union[str, Any]:
_snake_case = (
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
_snake_case = bs[:]
_snake_case = 0
for b in range(2**8 ):
if b not in bs:
bs.append(__lowerCamelCase )
cs.append(2**8 + n )
n += 1
_snake_case = [chr(__lowerCamelCase ) for n in cs]
return dict(zip(__lowerCamelCase , __lowerCamelCase ) )
def _UpperCAmelCase ( __lowerCamelCase : Any ) -> List[Any]:
_snake_case = set()
_snake_case = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_snake_case = char
return pairs
class lowerCAmelCase__ ( A_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]="replace" , _lowerCamelCase : Dict="<s>" , _lowerCamelCase : Optional[Any]="</s>" , _lowerCamelCase : Union[str, Any]="</s>" , _lowerCamelCase : str="<s>" , _lowerCamelCase : Union[str, Any]="<unk>" , _lowerCamelCase : Any="<pad>" , _lowerCamelCase : Union[str, Any]="<mask>" , _lowerCamelCase : Optional[int]=False , **_lowerCamelCase : str , ):
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else bos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else eos_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else sep_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else cls_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else unk_token
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_snake_case = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
super().__init__(
errors=_lowerCamelCase , bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , add_prefix_space=_lowerCamelCase , **_lowerCamelCase , )
with open(_lowerCamelCase , encoding='''utf-8''' ) as vocab_handle:
_snake_case = json.load(_lowerCamelCase )
_snake_case = {v: k for k, v in self.encoder.items()}
_snake_case = errors # how to handle errors in decoding
_snake_case = bytes_to_unicode()
_snake_case = {v: k for k, v in self.byte_encoder.items()}
with open(_lowerCamelCase , encoding='''utf-8''' ) as merges_handle:
_snake_case = merges_handle.read().split('''\n''' )[1:-1]
_snake_case = [tuple(merge.split() ) for merge in bpe_merges]
_snake_case = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
_snake_case = {}
_snake_case = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_snake_case = re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase ( self : Tuple ):
return len(self.encoder )
def lowercase ( self : str ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase ( self : Dict , _lowerCamelCase : str ):
if token in self.cache:
return self.cache[token]
_snake_case = tuple(_lowerCamelCase )
_snake_case = get_pairs(_lowerCamelCase )
if not pairs:
return token
while True:
_snake_case = min(_lowerCamelCase , key=lambda _lowerCamelCase : self.bpe_ranks.get(_lowerCamelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
_snake_case , _snake_case = bigram
_snake_case = []
_snake_case = 0
while i < len(_lowerCamelCase ):
try:
_snake_case = word.index(_lowerCamelCase , _lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_snake_case = j
if word[i] == first and i < len(_lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_snake_case = tuple(_lowerCamelCase )
_snake_case = new_word
if len(_lowerCamelCase ) == 1:
break
else:
_snake_case = get_pairs(_lowerCamelCase )
_snake_case = ''' '''.join(_lowerCamelCase )
_snake_case = word
return word
def lowercase ( self : str , _lowerCamelCase : Dict ):
_snake_case = []
for token in re.findall(self.pat , _lowerCamelCase ):
_snake_case = ''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_lowerCamelCase ).split(''' ''' ) )
return bpe_tokens
def lowercase ( self : Optional[Any] , _lowerCamelCase : List[str] ):
return self.encoder.get(_lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowercase ( self : Optional[int] , _lowerCamelCase : Dict ):
return self.decoder.get(_lowerCamelCase )
def lowercase ( self : Dict , _lowerCamelCase : Union[str, Any] ):
_snake_case = ''''''.join(_lowerCamelCase )
_snake_case = bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def lowercase ( self : str , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
_snake_case = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowerCamelCase , ensure_ascii=_lowerCamelCase ) + '''\n''' )
_snake_case = 0
with open(_lowerCamelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
_snake_case = token_index
writer.write(''' '''.join(_lowerCamelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def lowercase ( self : str , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_snake_case = [self.cls_token_id]
_snake_case = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase ( self : Tuple , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None , _lowerCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowercase ( self : Optional[int] , _lowerCamelCase : List[int] , _lowerCamelCase : Optional[List[int]] = None ):
_snake_case = [self.sep_token_id]
_snake_case = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase ( self : Any , _lowerCamelCase : int , _lowerCamelCase : Any=False , **_lowerCamelCase : List[Any] ):
_snake_case = kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_lowerCamelCase ) > 0 and not text[0].isspace()):
_snake_case = ''' ''' + text
return (text, kwargs)
def lowercase ( self : int , _lowerCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _lowerCamelCase : Optional[int] = None , _lowerCamelCase : Optional[bool] = None , ):
_snake_case = super()._pad(
encoded_inputs=_lowerCamelCase , max_length=_lowerCamelCase , padding_strategy=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
# Load from model defaults
if return_attention_mask is None:
_snake_case = '''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_snake_case = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_snake_case = len(encoded_inputs['''global_attention_mask'''] ) != len(_lowerCamelCase )
if needs_to_be_padded:
_snake_case = len(_lowerCamelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_snake_case = (
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
_snake_case = [-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 40
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
class A__ ( A__ ):
def __init__( self : str , *_a : Optional[int] , **_a : Dict ) -> None:
'''simple docstring'''
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , _a , )
super().__init__(*_a , **_a )
| 47
|
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
lowercase_ = (
'https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'
)
lowercase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowerCAmelCase ( ):
"""simple docstring"""
__A = '''https://pypi.org/pypi/diffusers/json'''
__A = json.loads(request.urlopen(__UpperCamelCase ).read() )['''releases'''].keys()
return sorted(__UpperCamelCase , key=lambda __UpperCamelCase : version.Version(__UpperCamelCase ) )
def lowerCAmelCase ( ):
"""simple docstring"""
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(__UpperCamelCase )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = Path(__UpperCamelCase ) / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
init_hf_modules()
__A = Path(__UpperCamelCase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase )
__A = dynamic_module_path / '''__init__.py'''
if not init_path.exists():
init_path.touch()
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import .xxx`
__A = re.findall('''^\s*import\s+\.(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('''^\s*from\s+\.(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Unique-ify
return list(set(__UpperCamelCase ) )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
__A = False
__A = [module_file]
__A = []
# Let's recurse through all relative imports
while not no_change:
__A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(__UpperCamelCase ) )
__A = Path(__UpperCamelCase ).parent
__A = [str(module_path / m ) for m in new_imports]
__A = [f for f in new_import_files if f not in all_relative_imports]
__A = [f'{f}.py' for f in new_import_files]
__A = len(__UpperCamelCase ) == 0
all_relative_imports.extend(__UpperCamelCase )
return all_relative_imports
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as f:
__A = f.read()
# Imports of the form `import xxx`
__A = re.findall('''^\s*import\s+(\S+)\s*$''' , __UpperCamelCase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('''^\s*from\s+(\S+)\s+import''' , __UpperCamelCase , flags=re.MULTILINE )
# Only keep the top-level module
__A = [imp.split('''.''' )[0] for imp in imports if not imp.startswith('''.''' )]
# Unique-ify and test we got them all
__A = list(set(__UpperCamelCase ) )
__A = []
for imp in imports:
try:
importlib.import_module(__UpperCamelCase )
except ImportError:
missing_packages.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
raise ImportError(
'''This modeling file requires the following packages that were not found in your environment: '''
f'{", ".join(__UpperCamelCase )}. Run `pip install {" ".join(__UpperCamelCase )}`' )
return get_relative_imports(__UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = module_path.replace(os.path.sep , '''.''' )
__A = importlib.import_module(__UpperCamelCase )
if class_name is None:
return find_pipeline_class(__UpperCamelCase )
return getattr(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
from ..pipelines import DiffusionPipeline
__A = dict(inspect.getmembers(__UpperCamelCase , inspect.isclass ) )
__A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , __UpperCamelCase )
and cls.__module__.split('''.''' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
f'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
f' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
f' {loaded_module}.' )
__A = cls
return pipeline_class
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , ):
"""simple docstring"""
__A = str(__UpperCamelCase )
__A = os.path.join(__UpperCamelCase , __UpperCamelCase )
if os.path.isfile(__UpperCamelCase ):
__A = module_file_or_url
__A = '''local'''
elif pretrained_model_name_or_path.count('''/''' ) == 0:
__A = get_diffusers_versions()
# cut ".dev0"
__A = '''v''' + '''.'''.join(__version__.split('''.''' )[:3] )
# retrieve github version that matches
if revision is None:
__A = latest_version if latest_version[1:] in available_versions else '''main'''
logger.info(f'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
__A = f'v{revision}'
elif revision == "main":
__A = revision
else:
raise ValueError(
f'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
f' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
__A = COMMUNITY_PIPELINES_URL.format(revision=__UpperCamelCase , pipeline=__UpperCamelCase )
try:
__A = cached_download(
__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = '''git'''
__A = pretrained_model_name_or_path + '''.py'''
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
__A = hf_hub_download(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , )
__A = os.path.join('''local''' , '''--'''.join(pretrained_model_name_or_path.split('''/''' ) ) )
except EnvironmentError:
logger.error(f'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
__A = check_imports(__UpperCamelCase )
# Now we move the module inside our cached dynamic modules.
__A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(__UpperCamelCase )
__A = Path(__UpperCamelCase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(__UpperCamelCase , submodule_path / module_file )
for module_needed in modules_needed:
__A = f'{module_needed}.py'
shutil.copy(os.path.join(__UpperCamelCase , __UpperCamelCase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(__UpperCamelCase , __UpperCamelCase ):
__A = use_auth_token
elif use_auth_token is True:
__A = HfFolder.get_token()
else:
__A = None
__A = model_info(__UpperCamelCase , revision=__UpperCamelCase , token=__UpperCamelCase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
__A = submodule_path / commit_hash
__A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(__UpperCamelCase )
if not (submodule_path / module_file).exists():
shutil.copy(__UpperCamelCase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
__UpperCamelCase , f'{module_needed}.py' , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return os.path.join(__UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , **__UpperCamelCase , ):
"""simple docstring"""
__A = get_cached_module_file(
__UpperCamelCase , __UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , resume_download=__UpperCamelCase , proxies=__UpperCamelCase , use_auth_token=__UpperCamelCase , revision=__UpperCamelCase , local_files_only=__UpperCamelCase , )
return get_class_in_module(__UpperCamelCase , final_module.replace('''.py''' , '''''' ) )
| 266
| 0
|
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
__A = ['bart.large', 'bart.large.mnli', 'bart.large.cnn', 'bart_xsum/model.pt']
__A = {'bart.large': BartModel, 'bart.large.mnli': BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse('''0.9.0'''):
raise Exception('''requires fairseq >= 0.9.0''')
logging.set_verbosity_info()
__A = logging.get_logger(__name__)
__A = ' Hello world! cécé herlolip'
__A = [
('model.classification_heads.mnli.dense.weight', 'classification_head.dense.weight'),
('model.classification_heads.mnli.dense.bias', 'classification_head.dense.bias'),
('model.classification_heads.mnli.out_proj.weight', 'classification_head.out_proj.weight'),
('model.classification_heads.mnli.out_proj.bias', 'classification_head.out_proj.bias'),
]
def __a ( lowerCAmelCase_ : List[str] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_= [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ ,lowerCamelCase_ )
def __a ( lowerCAmelCase_ : List[Any] ,lowerCAmelCase_ : Any ,lowerCAmelCase_ : Tuple ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_= dct.pop(lowerCamelCase_ )
UpperCAmelCase_= val
def __a ( lowerCAmelCase_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_= torch.load(lowerCamelCase_ ,map_location="""cpu""" )
UpperCAmelCase_= torch.hub.load("""pytorch/fairseq""" ,"""bart.large.cnn""" ).eval()
hub_interface.model.load_state_dict(sd["""model"""] )
return hub_interface
def __a ( lowerCAmelCase_ : List[Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_= emb.weight.shape
UpperCAmelCase_= nn.Linear(lowerCamelCase_ ,lowerCamelCase_ ,bias=lowerCamelCase_ )
UpperCAmelCase_= emb.weight.data
return lin_layer
@torch.no_grad()
def __a ( lowerCAmelCase_ : List[str] ,lowerCAmelCase_ : Tuple ,lowerCAmelCase_ : Optional[Any]=None ) -> Optional[int]:
'''simple docstring'''
if not os.path.exists(lowerCamelCase_ ):
UpperCAmelCase_= torch.hub.load("""pytorch/fairseq""" ,lowerCamelCase_ ).eval()
else:
UpperCAmelCase_= load_xsum_checkpoint(lowerCamelCase_ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
UpperCAmelCase_= checkpoint_path.replace(""".""" ,"""-""" )
UpperCAmelCase_= BartConfig.from_pretrained(lowerCamelCase_ )
UpperCAmelCase_= bart.encode(lowerCamelCase_ ).unsqueeze(0 )
UpperCAmelCase_= BartTokenizer.from_pretrained(lowerCamelCase_ ).encode(lowerCamelCase_ ,return_tensors="""pt""" ).unsqueeze(0 )
if not torch.eq(lowerCamelCase_ ,lowerCamelCase_ ).all():
raise ValueError(
F"""converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}""" )
if checkpoint_path == "bart.large.mnli":
UpperCAmelCase_= bart.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
UpperCAmelCase_= state_dict["""model.decoder.embed_tokens.weight"""]
for src, dest in mnli_rename_keys:
rename_key(lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ )
UpperCAmelCase_= BartForSequenceClassification(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
UpperCAmelCase_= bart.predict("""mnli""" ,lowerCamelCase_ ,return_logits=lowerCamelCase_ )
UpperCAmelCase_= model(lowerCamelCase_ )[0] # logits
else: # no classification heads to worry about
UpperCAmelCase_= bart.model.state_dict()
remove_ignore_keys_(lowerCamelCase_ )
UpperCAmelCase_= state_dict["""decoder.embed_tokens.weight"""]
UpperCAmelCase_= bart.extract_features(lowerCamelCase_ )
if hf_checkpoint_name == "facebook/bart-large":
UpperCAmelCase_= BartModel(lowerCamelCase_ ).eval()
model.load_state_dict(lowerCamelCase_ )
UpperCAmelCase_= model(lowerCamelCase_ ).model[0]
else:
UpperCAmelCase_= BartForConditionalGeneration(lowerCamelCase_ ).eval() # an existing summarization ckpt
model.model.load_state_dict(lowerCamelCase_ )
if hasattr(lowerCamelCase_ ,"""lm_head""" ):
UpperCAmelCase_= make_linear_from_emb(model.model.shared )
UpperCAmelCase_= model.model(lowerCamelCase_ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
F"""`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}""" )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError("""Some values in `fairseq_output` are different from `new_model_outputs`""" )
Path(lowerCamelCase_ ).mkdir(exist_ok=lowerCamelCase_ )
model.save_pretrained(lowerCamelCase_ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''', default=None, type=str, help='''Which huggingface architecture to use: bart-large-xsum'''
)
__A = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 358
|
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class lowercase ( snake_case__):
"""simple docstring"""
a__ : str = ["vqvae"]
def __init__( self : List[Any] , __UpperCAmelCase : AutoencoderKL , __UpperCAmelCase : UNetaDConditionModel , __UpperCAmelCase : Mel , __UpperCAmelCase : Union[DDIMScheduler, DDPMScheduler] , ) -> str:
super().__init__()
self.register_modules(unet=__UpperCAmelCase , scheduler=__UpperCAmelCase , mel=__UpperCAmelCase , vqvae=__UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return 50 if isinstance(self.scheduler , __UpperCAmelCase ) else 1_000
@torch.no_grad()
def __call__( self : List[Any] , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = None , __UpperCAmelCase : np.ndarray = None , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = 0 , __UpperCAmelCase : int = None , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Generator = None , __UpperCAmelCase : float = 0 , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : torch.Tensor = None , __UpperCAmelCase : Union[str, Any]=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
UpperCAmelCase_= steps or self.get_default_steps()
self.scheduler.set_timesteps(__UpperCAmelCase )
UpperCAmelCase_= step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size ) == int:
UpperCAmelCase_= (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
UpperCAmelCase_= randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__UpperCAmelCase , device=self.device , )
UpperCAmelCase_= noise
UpperCAmelCase_= None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.mel.audio_slice_to_image(__UpperCAmelCase )
UpperCAmelCase_= np.frombuffer(input_image.tobytes() , dtype="""uint8""" ).reshape(
(input_image.height, input_image.width) )
UpperCAmelCase_= (input_image / 255) * 2 - 1
UpperCAmelCase_= torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float ).to(self.device )
if self.vqvae is not None:
UpperCAmelCase_= self.vqvae.encode(torch.unsqueeze(__UpperCAmelCase , 0 ) ).latent_dist.sample(
generator=__UpperCAmelCase )[0]
UpperCAmelCase_= self.vqvae.config.scaling_factor * input_images
if start_step > 0:
UpperCAmelCase_= self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , self.scheduler.timesteps[start_step - 1] )
UpperCAmelCase_= (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
UpperCAmelCase_= int(mask_start_secs * pixels_per_second )
UpperCAmelCase_= int(mask_end_secs * pixels_per_second )
UpperCAmelCase_= self.scheduler.add_noise(__UpperCAmelCase , __UpperCAmelCase , torch.tensor(self.scheduler.timesteps[start_step:] ) )
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:] ) ):
if isinstance(self.unet , __UpperCAmelCase ):
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )["""sample"""]
else:
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase )["""sample"""]
if isinstance(self.scheduler , __UpperCAmelCase ):
UpperCAmelCase_= self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , eta=__UpperCAmelCase , generator=__UpperCAmelCase , )["""prev_sample"""]
else:
UpperCAmelCase_= self.scheduler.step(
model_output=__UpperCAmelCase , timestep=__UpperCAmelCase , sample=__UpperCAmelCase , generator=__UpperCAmelCase , )["""prev_sample"""]
if mask is not None:
if mask_start > 0:
UpperCAmelCase_= mask[:, step, :, :mask_start]
if mask_end > 0:
UpperCAmelCase_= mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
UpperCAmelCase_= 1 / self.vqvae.config.scaling_factor * images
UpperCAmelCase_= self.vqvae.decode(__UpperCAmelCase )["""sample"""]
UpperCAmelCase_= (images / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_= images.cpu().permute(0 , 2 , 3 , 1 ).numpy()
UpperCAmelCase_= (images * 255).round().astype("""uint8""" )
UpperCAmelCase_= list(
(Image.fromarray(_[:, :, 0] ) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__UpperCAmelCase , mode="""RGB""" ).convert("""L""" ) for _ in images) )
UpperCAmelCase_= [self.mel.image_to_audio(__UpperCAmelCase ) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__UpperCAmelCase )[:, np.newaxis, :] ) , **ImagePipelineOutput(__UpperCAmelCase ) )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : List[str] , __UpperCAmelCase : List[Image.Image] , __UpperCAmelCase : int = 50 ) -> np.ndarray:
assert isinstance(self.scheduler , __UpperCAmelCase )
self.scheduler.set_timesteps(__UpperCAmelCase )
UpperCAmelCase_= np.array(
[np.frombuffer(image.tobytes() , dtype="""uint8""" ).reshape((1, image.height, image.width) ) for image in images] )
UpperCAmelCase_= (sample / 255) * 2 - 1
UpperCAmelCase_= torch.Tensor(__UpperCAmelCase ).to(self.device )
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,) ) ):
UpperCAmelCase_= t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
UpperCAmelCase_= self.scheduler.alphas_cumprod[t]
UpperCAmelCase_= (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
UpperCAmelCase_= 1 - alpha_prod_t
UpperCAmelCase_= self.unet(__UpperCAmelCase , __UpperCAmelCase )["""sample"""]
UpperCAmelCase_= (1 - alpha_prod_t_prev) ** 0.5 * model_output
UpperCAmelCase_= (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
UpperCAmelCase_= sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def _SCREAMING_SNAKE_CASE ( __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : torch.Tensor , __UpperCAmelCase : float ) -> torch.Tensor:
UpperCAmelCase_= acos(torch.dot(torch.flatten(__UpperCAmelCase ) , torch.flatten(__UpperCAmelCase ) ) / torch.norm(__UpperCAmelCase ) / torch.norm(__UpperCAmelCase ) )
return sin((1 - alpha) * theta ) * xa / sin(__UpperCAmelCase ) + sin(alpha * theta ) * xa / sin(__UpperCAmelCase )
| 277
| 0
|
"""simple docstring"""
class a :
def __init__( self : Tuple ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: List[str] ={}
def lowerCamelCase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
print(self.vertex )
for i in self.vertex:
print(lowerCamelCase_ , """ -> """ , """ -> """.join([str(lowerCamelCase_ ) for j in self.vertex[i]] ) )
def lowerCamelCase__ ( self : Tuple , lowerCAmelCase : Any , lowerCAmelCase : Any ) -> Any:
'''simple docstring'''
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowerCamelCase_ )
else:
# else make a new vertex
SCREAMING_SNAKE_CASE_: Optional[int] =[to_vertex]
def lowerCamelCase__ ( self : int ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Tuple =[False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ , lowerCamelCase_ )
def lowerCamelCase__ ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE_: Union[str, Any] =True
print(lowerCamelCase_ , end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowerCamelCase_ , lowerCamelCase_ )
if __name__ == "__main__":
_UpperCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 173
|
def a( A : int ) -> str:
"""simple docstring"""
if number > 0:
raise ValueError("input must be a negative integer" )
a = len(bin(A )[3:] )
a = bin(abs(A ) - (1 << binary_number_length) )[3:]
a = (
(
"1"
+ "0" * (binary_number_length - len(A ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
| 0
|
'''simple docstring'''
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__snake_case = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def a ( __a ) -> Optional[int]:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__a )
def a ( __a ) -> str:
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
UpperCamelCase__ :Union[str, Any] = terminalreporter.config.getoption('''--make-reports''' )
if make_reports:
pytest_terminal_summary_main(__a , id=__a )
| 367
|
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__snake_case = get_logger()
__snake_case = None
class lowercase ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
"""simple docstring"""
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ):
'''simple docstring'''
super().__init__(features=UpperCamelCase_ )
import jax
from jaxlib.xla_client import Device
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
raise ValueError(
F'''Expected {device} to be a `str` not {type(UpperCamelCase_ )}, as `jaxlib.xla_extension.Device` '''
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
UpperCamelCase__ :Tuple = device if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ :Optional[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
F'''Device with string identifier {self.device} not listed among the available '''
F'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
F'''device: {str(jax.devices()[0] )}.''' )
UpperCamelCase__ :Optional[int] = str(jax.devices()[0] )
UpperCamelCase__ :Tuple = jnp_array_kwargs
@staticmethod
def lowerCAmelCase__ ( ):
'''simple docstring'''
import jax
return {str(UpperCamelCase_ ): device for device in jax.devices()}
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and column:
if all(
isinstance(UpperCamelCase_ , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(UpperCamelCase_ , axis=0 )
return column
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(UpperCamelCase_ , (str, bytes, type(UpperCamelCase_ )) ):
return value
elif isinstance(UpperCamelCase_ , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCamelCase__ :Optional[int] = {}
if isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCamelCase__ :List[str] = {'''dtype''': jnp.intaa}
else:
UpperCamelCase__ :Union[str, Any] = {'''dtype''': jnp.intaa}
elif isinstance(UpperCamelCase_ , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCamelCase__ :Optional[Any] = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCamelCase_ , PIL.Image.Image ):
UpperCamelCase__ :str = np.asarray(UpperCamelCase_ )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCamelCase__ :Dict = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(UpperCamelCase_ , **{**default_dtype, **self.jnp_array_kwargs} )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(UpperCamelCase_ , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(UpperCamelCase_ , '''__array__''' ) and not isinstance(UpperCamelCase_ , jax.Array ):
UpperCamelCase__ :int = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCamelCase_ , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCamelCase_ ) for substruct in data_struct] )
return self._tensorize(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCamelCase_ , map_list=UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.numpy_arrow_extractor().extract_row(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = self.python_features_decoder.decode_row(UpperCamelCase_ )
return self.recursive_tensorize(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = self.numpy_arrow_extractor().extract_column(UpperCamelCase_ )
UpperCamelCase__ :Tuple = self.python_features_decoder.decode_column(UpperCamelCase_ , pa_table.column_names[0] )
UpperCamelCase__ :Dict = self.recursive_tensorize(UpperCamelCase_ )
UpperCamelCase__ :str = self._consolidate(UpperCamelCase_ )
return column
def lowerCAmelCase__ ( self , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :int = self.numpy_arrow_extractor().extract_batch(UpperCamelCase_ )
UpperCamelCase__ :Union[str, Any] = self.python_features_decoder.decode_batch(UpperCamelCase_ )
UpperCamelCase__ :List[str] = self.recursive_tensorize(UpperCamelCase_ )
for column_name in batch:
UpperCamelCase__ :Optional[int] = self._consolidate(batch[column_name] )
return batch
| 219
| 0
|
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __A( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = (KDPMaDiscreteScheduler,)
SCREAMING_SNAKE_CASE__ = 10
def UpperCAmelCase_ (self , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**lowercase__ )
return config
def UpperCAmelCase_ (self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase__ )
def UpperCAmelCase_ (self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=lowercase__ , beta_end=lowercase__ )
def UpperCAmelCase_ (self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase__ )
def UpperCAmelCase_ (self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase__ )
def UpperCAmelCase_ (self ):
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(prediction_type="""v_prediction""" )
UpperCamelCase__ = scheduler_class(**lowercase__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(lowercase__ , lowercase__ )
UpperCamelCase__ = model(lowercase__ , lowercase__ )
UpperCamelCase__ = scheduler.step(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase__ = torch.mean(torch.abs(lowercase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6934E-07 ) < 1E-2
assert abs(result_mean.item() - 6.1112E-10 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 4.693428650170972E-07 ) < 1E-2
assert abs(result_mean.item() - 0.0002 ) < 1E-3
def UpperCAmelCase_ (self ):
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**lowercase__ )
scheduler.set_timesteps(self.num_inference_steps )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter * scheduler.init_noise_sigma
UpperCamelCase__ = sample.to(lowercase__ )
for i, t in enumerate(scheduler.timesteps ):
UpperCamelCase__ = scheduler.scale_model_input(lowercase__ , lowercase__ )
UpperCamelCase__ = model(lowercase__ , lowercase__ )
UpperCamelCase__ = scheduler.step(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase__ = torch.mean(torch.abs(lowercase__ ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
def UpperCAmelCase_ (self ):
if torch_device == "mps":
return
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**lowercase__ )
scheduler.set_timesteps(self.num_inference_steps , device=lowercase__ )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter.to(lowercase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
UpperCamelCase__ = scheduler.scale_model_input(lowercase__ , lowercase__ )
UpperCamelCase__ = model(lowercase__ , lowercase__ )
UpperCamelCase__ = scheduler.step(lowercase__ , lowercase__ , lowercase__ )
UpperCamelCase__ = output.prev_sample
UpperCamelCase__ = torch.sum(torch.abs(lowercase__ ) )
UpperCamelCase__ = torch.mean(torch.abs(lowercase__ ) )
if str(lowercase__ ).startswith("""cpu""" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125 ) < 1E-2
assert abs(result_mean.item() - 0.0266 ) < 1E-3
| 244
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any = logging.get_logger(__name__)
A_ : Optional[Any] = {
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class A_ ( _a ):
'''simple docstring'''
a__ = "pegasus"
a__ = ["past_key_values"]
a__ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__(self , lowercase__=50_265 , lowercase__=1_024 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=12 , lowercase__=4_096 , lowercase__=16 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=True , lowercase__=True , lowercase__="gelu" , lowercase__=1_024 , lowercase__=0.1 , lowercase__=0.0 , lowercase__=0.0 , lowercase__=0.02 , lowercase__=0 , lowercase__=False , lowercase__=0 , lowercase__=1 , lowercase__=1 , **lowercase__ , ) -> str:
__UpperCAmelCase = vocab_size
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = d_model
__UpperCAmelCase = encoder_ffn_dim
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = encoder_attention_heads
__UpperCAmelCase = decoder_ffn_dim
__UpperCAmelCase = decoder_layers
__UpperCAmelCase = decoder_attention_heads
__UpperCAmelCase = dropout
__UpperCAmelCase = attention_dropout
__UpperCAmelCase = activation_dropout
__UpperCAmelCase = activation_function
__UpperCAmelCase = init_std
__UpperCAmelCase = encoder_layerdrop
__UpperCAmelCase = decoder_layerdrop
__UpperCAmelCase = use_cache
__UpperCAmelCase = encoder_layers
__UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=lowercase__ , eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , **lowercase__ , )
@property
def lowerCAmelCase_ (self ) -> int:
return self.encoder_attention_heads
@property
def lowerCAmelCase_ (self ) -> int:
return self.d_model
| 333
| 0
|
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Optional[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''_float_tensor''',
'''decoder.output_projection.weight''',
]
for k in ignore_keys:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Optional[int]:
"""simple docstring"""
snake_case__ , snake_case__ : str = emb.weight.shape
snake_case__ : List[str] = nn.Linear(__lowerCAmelCase , __lowerCAmelCase , bias=__lowerCAmelCase )
snake_case__ : int = emb.weight.data
return lin_layer
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase="facebook/mbart-large-en-ro" , __lowerCAmelCase=False , __lowerCAmelCase=False ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : int = torch.load(__lowerCAmelCase , map_location='''cpu''' )['''model''']
remove_ignore_keys_(__lowerCAmelCase )
snake_case__ : int = state_dict['''encoder.embed_tokens.weight'''].shape[0]
snake_case__ : str = MBartConfig.from_pretrained(__lowerCAmelCase , vocab_size=__lowerCAmelCase )
if mbart_aa and finetuned:
snake_case__ : str = '''relu'''
snake_case__ : int = state_dict['''decoder.embed_tokens.weight''']
snake_case__ : int = MBartForConditionalGeneration(__lowerCAmelCase )
model.model.load_state_dict(__lowerCAmelCase )
if finetuned:
snake_case__ : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'''
)
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--hf_config''',
default='''facebook/mbart-large-cc25''',
type=str,
help='''Which huggingface architecture to use: mbart-large''',
)
parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''')
parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''')
A__ = parser.parse_args()
A__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 44
|
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
A__ = logging.get_logger(__name__)
A__ = {
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/config.json''',
# See all BART models at https://huggingface.co/models?filter=bart
}
class a ( __lowerCamelCase ):
__lowerCAmelCase : List[str] = """bart"""
__lowerCAmelCase : Any = ["""past_key_values"""]
__lowerCAmelCase : List[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :int ,__lowercase :Union[str, Any]=5_0_2_6_5 ,__lowercase :Optional[int]=1_0_2_4 ,__lowercase :int=1_2 ,__lowercase :Tuple=4_0_9_6 ,__lowercase :str=1_6 ,__lowercase :List[Any]=1_2 ,__lowercase :str=4_0_9_6 ,__lowercase :List[str]=1_6 ,__lowercase :Optional[int]=0.0 ,__lowercase :List[str]=0.0 ,__lowercase :int="gelu" ,__lowercase :int=1_0_2_4 ,__lowercase :Any=0.1 ,__lowercase :Optional[Any]=0.0 ,__lowercase :List[Any]=0.0 ,__lowercase :Tuple=0.02 ,__lowercase :List[str]=0.0 ,__lowercase :int=False ,__lowercase :Any=True ,__lowercase :List[str]=3 ,__lowercase :List[Any]=1 ,__lowercase :List[str]=0 ,__lowercase :List[str]=2 ,__lowercase :Union[str, Any]=True ,__lowercase :List[Any]=2 ,__lowercase :Dict=2 ,**__lowercase :List[str] ,):
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : int = encoder_layers
snake_case__ : Union[str, Any] = encoder_attention_heads
snake_case__ : List[str] = decoder_ffn_dim
snake_case__ : Any = decoder_layers
snake_case__ : Union[str, Any] = decoder_attention_heads
snake_case__ : int = dropout
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = activation_dropout
snake_case__ : List[Any] = activation_function
snake_case__ : Any = init_std
snake_case__ : Union[str, Any] = encoder_layerdrop
snake_case__ : Optional[int] = decoder_layerdrop
snake_case__ : List[Any] = classifier_dropout
snake_case__ : Tuple = use_cache
snake_case__ : List[str] = encoder_layers
snake_case__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__lowercase ,pad_token_id=__lowercase ,bos_token_id=__lowercase ,eos_token_id=__lowercase ,is_encoder_decoder=__lowercase ,decoder_start_token_id=__lowercase ,forced_eos_token_id=__lowercase ,**__lowercase ,)
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('''force_bos_token_to_be_generated''' ,__lowercase ):
snake_case__ : int = self.bos_token_id
warnings.warn(
F"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
'''The config can simply be saved and uploaded again to be fixed.''' )
class a ( __lowerCamelCase ):
@property
def __lowerCamelCase ( self :Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ : Union[str, Any] = {0: '''batch'''}
snake_case__ : Any = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
snake_case__ : Any = {0: '''batch''', 1: '''decoder_sequence'''}
snake_case__ : Dict = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase ,direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : List[str] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
snake_case__ , snake_case__ : Dict = self.num_layers
for i in range(__lowercase ):
snake_case__ : int = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : Tuple = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
snake_case__ : Optional[Any] = OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
def __lowerCamelCase ( self :Dict ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : List[str] = super().outputs
else:
snake_case__ : List[str] = super(__lowercase ,self ).outputs
if self.use_past:
snake_case__ , snake_case__ : Any = self.num_layers
for i in range(__lowercase ):
snake_case__ : Optional[int] = {0: '''batch''', 2: '''past_sequence + sequence'''}
snake_case__ : List[Any] = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowerCamelCase ( self :Optional[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
# Generate decoder inputs
snake_case__ : List[Any] = seq_length if not self.use_past else 1
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
snake_case__ : Any = {F"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
snake_case__ : List[str] = dict(**__lowercase ,**__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : Union[str, Any] = common_inputs['''input_ids'''].shape
snake_case__ : List[str] = common_inputs['''decoder_input_ids'''].shape[1]
snake_case__ , snake_case__ : Dict = self.num_attention_heads
snake_case__ : List[str] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[int] = decoder_seq_length + 3
snake_case__ : Dict = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : List[Any] = torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase ,__lowercase )] ,dim=1 )
snake_case__ : Any = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__ , snake_case__ : List[Any] = self.num_layers
snake_case__ : List[Any] = min(__lowercase ,__lowercase )
snake_case__ : Dict = max(__lowercase ,__lowercase ) - min_num_layers
snake_case__ : Union[str, Any] = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
snake_case__ : Optional[Any] = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase ,__lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __lowerCamelCase ( self :List[Any] ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
snake_case__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
snake_case__ , snake_case__ : str = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
snake_case__ : Dict = seqlen + 2
snake_case__ , snake_case__ : Tuple = self.num_layers
snake_case__ , snake_case__ : List[str] = self.num_attention_heads
snake_case__ : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : int = common_inputs['''attention_mask'''].dtype
snake_case__ : int = torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase ,__lowercase ,dtype=__lowercase )] ,dim=1 )
snake_case__ : Union[str, Any] = [
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __lowerCamelCase ( self :str ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Optional[int] = compute_effective_axis_dimension(
__lowercase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : str = tokenizer.num_special_tokens_to_add(__lowercase )
snake_case__ : int = compute_effective_axis_dimension(
__lowercase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : Union[str, Any] = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : Optional[Any] = dict(tokenizer(__lowercase ,return_tensors=__lowercase ) )
return common_inputs
def __lowerCamelCase ( self :int ,__lowercase :PreTrainedTokenizer ,__lowercase :int = -1 ,__lowercase :int = -1 ,__lowercase :bool = False ,__lowercase :Optional[TensorType] = None ,):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : str = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
elif self.task == "causal-lm":
snake_case__ : int = self._generate_dummy_inputs_for_causal_lm(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
else:
snake_case__ : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__lowercase ,batch_size=__lowercase ,seq_length=__lowercase ,is_pair=__lowercase ,framework=__lowercase )
return common_inputs
def __lowerCamelCase ( self :Tuple ,__lowercase :Optional[int] ,__lowercase :List[str] ,__lowercase :Optional[int] ,__lowercase :Tuple ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Optional[Any] = super()._flatten_past_key_values_(__lowercase ,__lowercase ,__lowercase ,__lowercase )
else:
snake_case__ : int = super(__lowercase ,self )._flatten_past_key_values_(
__lowercase ,__lowercase ,__lowercase ,__lowercase )
| 44
| 1
|
"""simple docstring"""
from typing import Any
class snake_case_:
def __init__( self : Any , UpperCamelCase_ : Any ):
lowerCAmelCase : Optional[Any] = data
lowerCAmelCase : Optional[int] = None
def __repr__( self : str ):
return F'''Node({self.data})'''
class snake_case_:
def __init__( self : str ):
lowerCAmelCase : Dict = None
def __iter__( self : int ):
lowerCAmelCase : str = self.head
while node:
yield node.data
lowerCAmelCase : Union[str, Any] = node.next
def __len__( self : Any ):
return sum(1 for _ in self )
def __repr__( self : Any ):
return "->".join([str(__UpperCAmelCase ) for item in self] )
def __getitem__( self : int , UpperCamelCase_ : int ):
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
lowerCAmelCase : List[Any] = self.head
for _ in range(__UpperCAmelCase ):
lowerCAmelCase : Dict = current.next
lowerCAmelCase : Optional[Any] = data
def lowerCamelCase__ ( self : Any , UpperCamelCase_ : Any ):
self.insert_nth(len(self ) , __UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : Any ):
self.insert_nth(0 , __UpperCAmelCase )
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase_ : int , UpperCamelCase_ : Any ):
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
lowerCAmelCase : Any = Node(__UpperCAmelCase )
if self.head is None:
lowerCAmelCase : int = new_node
elif index == 0:
lowerCAmelCase : Dict = self.head # link new_node to head
lowerCAmelCase : Union[str, Any] = new_node
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : Optional[int] = temp.next
lowerCAmelCase : int = new_node
def lowerCamelCase__ ( self : int ): # print every node data
print(self )
def lowerCamelCase__ ( self : Optional[Any] ):
return self.delete_nth(0 )
def lowerCamelCase__ ( self : Any ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase_ : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
lowerCAmelCase : Optional[Any] = self.head # default first node
if index == 0:
lowerCAmelCase : int = self.head.next
else:
lowerCAmelCase : List[str] = self.head
for _ in range(index - 1 ):
lowerCAmelCase : int = temp.next
lowerCAmelCase : Any = temp.next
lowerCAmelCase : str = temp.next.next
return delete_node.data
def lowerCamelCase__ ( self : int ):
return self.head is None
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : str = None
lowerCAmelCase : Tuple = self.head
while current:
# Store the current node's next node.
lowerCAmelCase : Any = current.next
# Make the current node's next point backwards
lowerCAmelCase : Any = prev
# Make the previous node be the current node
lowerCAmelCase : Optional[int] = current
# Make the current node the next node (to progress iteration)
lowerCAmelCase : int = next_node
# Return prev in order to put the head at the end
lowerCAmelCase : Dict = prev
def _snake_case ( ):
lowerCAmelCase : Any = LinkedList()
assert linked_list.is_empty() is True
assert str(A_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(A_ ) == i
linked_list.insert_nth(A_ , i + 1 )
assert str(A_ ) == "->".join(str(A_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(A_ ) == "->".join(str(A_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(A_ ) == 9
assert str(A_ ) == "->".join(str(A_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowerCAmelCase : Optional[int] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(A_ ) == "->".join(str(A_ ) for i in range(-8 , 1 ) )
def _snake_case ( ):
lowerCAmelCase : str = [
-9,
100,
Node(77345112 ),
"dlrow olleH",
7,
5555,
0,
-192.55555,
"Hello, world!",
77.9,
Node(10 ),
None,
None,
12.20,
]
lowerCAmelCase : str = LinkedList()
for i in test_input:
linked_list.insert_tail(A_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(A_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowerCAmelCase : int = linked_list.delete_head()
assert result == -9
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowerCAmelCase : int = linked_list.delete_tail()
assert result == 12.2
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowerCAmelCase : Union[str, Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(A_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(A_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(A_ )
assert (
str(A_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(A_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _snake_case ( ):
from doctest import testmod
testmod()
lowerCAmelCase : int = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(A_ )
print('''\nReading/changing Node data using indexing:''' )
print(f'''Element at Position 1: {linked_list[1]}''' )
lowerCAmelCase : Tuple = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(A_ )
print(f'''length of linked_list is : {len(A_ )}''' )
if __name__ == "__main__":
main()
| 60
|
"""simple docstring"""
def lowercase ( A_ )-> str:
'''simple docstring'''
if isinstance(A_ , A_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if isinstance(A_ , A_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if num == 0:
return "0b0"
a : Optional[Any] = False
if num < 0:
a : Tuple = True
a : str = -num
a : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(A_ ) for e in binary )
return "0b" + "".join(str(A_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 40
| 0
|
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
def a__ ( UpperCAmelCase : int ) -> int:
UpperCAmelCase : Any = r'''\w+[.]\d+'''
UpperCAmelCase : Optional[Any] = re.findall(UpperCAmelCase , UpperCAmelCase )
for pat in pats:
UpperCAmelCase : Optional[Any] = key.replace(UpperCAmelCase , '''_'''.join(pat.split('''.''' ) ) )
return key
def a__ ( UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple ) -> Optional[Any]:
UpperCAmelCase : str = pt_tuple_key[:-1] + ('''scale''',)
if (
any('''norm''' in str_ for str_ in pt_tuple_key )
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ('''scale''',)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
UpperCAmelCase : Tuple = pt_tuple_key[:-1] + ('''embedding''',)
return renamed_pt_tuple_key, pt_tensor
# conv layer
UpperCAmelCase : Any = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
UpperCAmelCase : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
UpperCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ('''kernel''',)
if pt_tuple_key[-1] == "weight":
UpperCAmelCase : List[str] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
UpperCAmelCase : Optional[int] = pt_tuple_key[:-1] + ('''weight''',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
UpperCAmelCase : Any = pt_tuple_key[:-1] + ('''bias''',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def a__ ( UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[Any]=42 ) -> Optional[Any]:
# Step 1: Convert pytorch tensor to numpy
UpperCAmelCase : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
UpperCAmelCase : Optional[int] = flax_model.init_weights(PRNGKey(UpperCAmelCase ) )
UpperCAmelCase : List[str] = flatten_dict(UpperCAmelCase )
UpperCAmelCase : int = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
UpperCAmelCase : Optional[Any] = rename_key(UpperCAmelCase )
UpperCAmelCase : Any = tuple(renamed_pt_key.split('''.''' ) )
# Correctly rename weight parameters
UpperCAmelCase , UpperCAmelCase : Dict = rename_key_and_reshape_tensor(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '''
f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' )
# also add unexpected weight so that warning is thrown
UpperCAmelCase : str = jnp.asarray(UpperCAmelCase )
return unflatten_dict(UpperCAmelCase )
| 99
|
from __future__ import annotations
import queue
class __UpperCAmelCase :
def __init__( self : str, __A : Union[str, Any] ):
UpperCAmelCase : Dict = data
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = None
def a__ ( ) -> TreeNode:
print('''\n********Press N to stop entering at any point of time********\n''' )
UpperCAmelCase : Any = input('''Enter the value of the root node: ''' ).strip().lower()
UpperCAmelCase : queue.Queue = queue.Queue()
UpperCAmelCase : Tuple = TreeNode(int(UpperCAmelCase ) )
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = q.get()
UpperCAmelCase : Union[str, Any] = f'''Enter the left node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : List[str] = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : List[str] = left_node
q.put(UpperCAmelCase )
UpperCAmelCase : List[Any] = f'''Enter the right node of {node_found.data}: '''
UpperCAmelCase : List[Any] = input(UpperCAmelCase ).strip().lower() or '''n'''
if check == "n":
return tree_node
UpperCAmelCase : Dict = TreeNode(int(UpperCAmelCase ) )
UpperCAmelCase : Dict = right_node
q.put(UpperCAmelCase )
raise
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
print(node.data , end=''',''' )
pre_order(node.left )
pre_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
in_order(node.left )
print(node.data , end=''',''' )
in_order(node.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=''',''' )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : List[Any] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : queue.Queue = queue.Queue()
q.put(UpperCAmelCase )
while not q.empty():
UpperCAmelCase : int = []
while not q.empty():
UpperCAmelCase : List[str] = q.get()
print(node_dequeued.data , end=''',''' )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(UpperCAmelCase )
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : List[str] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=''',''' )
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
# end of while means current node doesn't have left child
UpperCAmelCase : Union[str, Any] = stack.pop()
# start to traverse its right child
UpperCAmelCase : List[str] = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase : list[TreeNode] = []
UpperCAmelCase : Any = node
while n or stack:
while n:
stack.append(UpperCAmelCase )
UpperCAmelCase : Dict = n.left
UpperCAmelCase : Optional[int] = stack.pop()
print(n.data , end=''',''' )
UpperCAmelCase : Any = n.right
def a__ ( UpperCAmelCase : TreeNode ) -> None:
if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not node:
return
UpperCAmelCase , UpperCAmelCase : Dict = [], []
UpperCAmelCase : Any = node
stacka.append(UpperCAmelCase )
while stacka: # to find the reversed order of post order, store it in stack2
UpperCAmelCase : Union[str, Any] = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(UpperCAmelCase )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=''',''' )
def a__ ( UpperCAmelCase : str = "" , UpperCAmelCase : int=50 , UpperCAmelCase : Union[str, Any]="*" ) -> str:
if not s:
return "\n" + width * char
UpperCAmelCase , UpperCAmelCase : int = divmod(width - len(UpperCAmelCase ) - 2 , 2 )
return f'''{left * char} {s} {(left + extra) * char}'''
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt("Binary Tree Traversals"))
_lowerCamelCase : TreeNode = build_tree()
print(prompt("Pre Order Traversal"))
pre_order(node)
print(prompt() + "\n")
print(prompt("In Order Traversal"))
in_order(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal"))
post_order(node)
print(prompt() + "\n")
print(prompt("Level Order Traversal"))
level_order(node)
print(prompt() + "\n")
print(prompt("Actual Level Order Traversal"))
level_order_actual(node)
print("*" * 5_0 + "\n")
print(prompt("Pre Order Traversal - Iteration Version"))
pre_order_iter(node)
print(prompt() + "\n")
print(prompt("In Order Traversal - Iteration Version"))
in_order_iter(node)
print(prompt() + "\n")
print(prompt("Post Order Traversal - Iteration Version"))
post_order_iter(node)
print(prompt())
| 99
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_: Optional[Any] ={
'configuration_x_clip': [
'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XCLIPConfig',
'XCLIPTextConfig',
'XCLIPVisionConfig',
],
'processing_x_clip': ['XCLIPProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_: str =[
'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'XCLIPModel',
'XCLIPPreTrainedModel',
'XCLIPTextModel',
'XCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_: List[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 1
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Dict = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """openai-gpt"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int], _snake_case : Dict=4_0_4_7_8, _snake_case : str=5_1_2, _snake_case : int=7_6_8, _snake_case : Tuple=1_2, _snake_case : Any=1_2, _snake_case : str="gelu", _snake_case : List[str]=0.1, _snake_case : Any=0.1, _snake_case : Dict=0.1, _snake_case : int=1e-5, _snake_case : Optional[Any]=0.0_2, _snake_case : List[Any]="cls_index", _snake_case : Any=True, _snake_case : Any=None, _snake_case : int=True, _snake_case : Optional[Any]=0.1, **_snake_case : List[Any], ) ->Optional[int]:
snake_case__ : int = vocab_size
snake_case__ : Dict = n_positions
snake_case__ : str = n_embd
snake_case__ : str = n_layer
snake_case__ : List[Any] = n_head
snake_case__ : List[Any] = afn
snake_case__ : Optional[Any] = resid_pdrop
snake_case__ : List[str] = embd_pdrop
snake_case__ : List[Any] = attn_pdrop
snake_case__ : Optional[int] = layer_norm_epsilon
snake_case__ : str = initializer_range
snake_case__ : List[str] = summary_type
snake_case__ : Optional[int] = summary_use_proj
snake_case__ : List[str] = summary_activation
snake_case__ : Optional[Any] = summary_first_dropout
snake_case__ : int = summary_proj_to_labels
super().__init__(**_snake_case )
| 277
| 0
|
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def SCREAMING_SNAKE_CASE_ ( __A : Tuple , __A : int , __A : List[str]=10_24 , __A : Tuple=10_24 , __A : int=False , **__A : Dict ) -> Tuple:
"""simple docstring"""
a_ : Any = AutoTokenizer.from_pretrained(__A )
a_ : Union[str, Any] = SeqaSeqDataset(__A , __A , __A , __A , type_path='train' , **__A )
a_ : int = tok.pad_token_id
def get_lens(__A : Optional[int] ):
a_ : int = tqdm(
DataLoader(__A , batch_size=5_12 , num_workers=8 , shuffle=__A , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
a_ : Union[str, Any] = []
for batch in dl:
a_ : Optional[int] = batch['input_ids'].ne(__A ).sum(1 ).tolist()
a_ : Dict = batch['labels'].ne(__A ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__A , __A ):
max_lens.append(max(__A , __A ) )
else:
max_lens.extend(__A )
return max_lens
a_ : List[str] = get_lens(__A )
a_ : Dict = SeqaSeqDataset(__A , __A , __A , __A , type_path='val' , **__A )
a_ : int = get_lens(__A )
pickle_save(__A , train_ds.len_file )
pickle_save(__A , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 355
|
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def SCREAMING_SNAKE_CASE_ ( __A : Any ) -> Optional[int]:
"""simple docstring"""
a_ : str = filter(lambda __A : p.requires_grad , model.parameters() )
a_ : List[Any] = sum([np.prod(p.size() ) for p in model_parameters] )
return params
UpperCAmelCase_ : Dict = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : int ) -> List[str]:
"""simple docstring"""
if metric == "rouge2":
a_ : Dict = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
a_ : Dict = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
a_ : Union[str, Any] = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
a_ : str = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
' function.' )
a_ : Dict = ModelCheckpoint(
dirpath=__A , filename=__A , monitor=F"""val_{metric}""" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def SCREAMING_SNAKE_CASE_ ( __A : int , __A : List[Any] ) -> int:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode='min' if 'loss' in metric else 'max' , patience=__A , verbose=__A , )
class SCREAMING_SNAKE_CASE__ ( pl.Callback ):
def SCREAMING_SNAKE_CASE ( self : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Any:
a_ : int = {F"""lr_group_{i}""": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : List[str] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : List[str]=True ) -> None:
logger.info(F"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
a_ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
a_ : Optional[int] = Path(pl_module.hparams.output_dir )
if type_path == "test":
a_ : Tuple = od / 'test_results.txt'
a_ : Optional[int] = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
a_ : str = od / F"""{type_path}_results/{trainer.global_step:05d}.txt"""
a_ : Optional[Any] = od / F"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
generations_file.parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , 'a+' ) as writer:
for key in sorted(SCREAMING_SNAKE_CASE__ ):
if key in ["log", "progress_bar", "preds"]:
continue
a_ : int = metrics[key]
if isinstance(SCREAMING_SNAKE_CASE__ , torch.Tensor ):
a_ : List[str] = val.item()
a_ : int = F"""{key}: {val:.6f}\n"""
writer.write(SCREAMING_SNAKE_CASE__ )
if not save_generations:
return
if "preds" in metrics:
a_ : Optional[Any] = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(SCREAMING_SNAKE_CASE__ )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict:
try:
a_ : Any = pl_module.model.model.num_parameters()
except AttributeError:
a_ : List[str] = pl_module.model.num_parameters()
a_ : Any = count_trainable_parameters(SCREAMING_SNAKE_CASE__ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : pl.LightningModule ) -> Optional[int]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , 'test' )
@rank_zero_only
def SCREAMING_SNAKE_CASE ( self : Any , SCREAMING_SNAKE_CASE__ : pl.Trainer , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Optional[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 120
| 0
|
'''simple docstring'''
from numpy import exp, pi, sqrt
def __UpperCAmelCase ( a_: Any, a_: float = 0.0, a_: float = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 145
|
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class __snake_case :
def __init__( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[Any]=13 , _lowercase : List[Any]=7 , _lowercase : Optional[int]=True , _lowercase : str=True , _lowercase : str=True , _lowercase : List[str]=True , _lowercase : List[str]=99 , _lowercase : List[str]=32 , _lowercase : str=5 , _lowercase : str=4 , _lowercase : str=4 , _lowercase : Union[str, Any]="gelu" , _lowercase : str=0.0 , _lowercase : Union[str, Any]=0.1 , _lowercase : List[str]=True , _lowercase : Union[str, Any]=5_12 , _lowercase : List[str]=16 , _lowercase : Dict=2 , _lowercase : int=0.02 , _lowercase : Any=3 , _lowercase : int=4 , _lowercase : List[str]=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = seq_length
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_input_mask
SCREAMING_SNAKE_CASE__ = use_token_type_ids
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_multiple_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout
SCREAMING_SNAKE_CASE__ = attention_dropout
SCREAMING_SNAKE_CASE__ = weight_tying
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = type_sequence_label_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = num_choices
SCREAMING_SNAKE_CASE__ = scope
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, input_ids, input_mask, token_labels
def __a ( self : Optional[int] ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowercase , initializer_range=self.initializer_range , )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = True
return config, input_ids, input_mask, token_labels
def __a ( self : List[Any] , _lowercase : Optional[Any] , _lowercase : int , _lowercase : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Dict , _lowercase : int , _lowercase : str , _lowercase : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseModel(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : Any , _lowercase : Dict , _lowercase : List[Any] , _lowercase : Optional[Any] , _lowercase : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : int , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseForCausalLM(config=_lowercase )
model.to(_lowercase )
model.eval()
# first forward pass
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ = model(_lowercase , attention_mask=_lowercase , output_hidden_states=_lowercase )
SCREAMING_SNAKE_CASE__ = output_from_no_past["""hidden_states"""][0]
SCREAMING_SNAKE_CASE__ = model(
_lowercase , attention_mask=_lowercase , past_key_values=_lowercase , output_hidden_states=_lowercase , )["""hidden_states"""][0]
# select random slice
SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-3 ) )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
lowerCAmelCase_ = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
lowerCAmelCase_ = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ = (
{"feature-extraction": GPTNeoXJapaneseModel, "text-generation": GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def __a ( self : Tuple ):
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowercase , _lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_decoder()
SCREAMING_SNAKE_CASE__ = None
self.model_tester.create_and_check_model_as_decoder(_lowercase , _lowercase , _lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowercase , _lowercase , _lowercase )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowercase )
@slow
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """abeja/gpt-neox-japanese-2.7b"""
SCREAMING_SNAKE_CASE__ = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
SCREAMING_SNAKE_CASE__ = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseTokenizer.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ = []
for prompt in prompts:
SCREAMING_SNAKE_CASE__ = tokenizer(_lowercase , return_tensors="""pt""" ).input_ids
SCREAMING_SNAKE_CASE__ = model.generate(_lowercase , max_length=50 )
SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
predicted_outputs += generated_string
self.assertListEqual(_lowercase , _lowercase )
| 219
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """megatron-bert"""
def __init__( self : str , _lowerCAmelCase : Dict=2_9_0_5_6 , _lowerCAmelCase : str=1_0_2_4 , _lowerCAmelCase : List[str]=2_4 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : Tuple=4_0_9_6 , _lowerCAmelCase : Dict="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[str]=0.1 , _lowerCAmelCase : Any=5_1_2 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : str=1e-12 , _lowerCAmelCase : List[Any]=0 , _lowerCAmelCase : int="absolute" , _lowerCAmelCase : Dict=True , **_lowerCAmelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCAmelCase , **_lowerCAmelCase)
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =hidden_act
__lowercase =intermediate_size
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =initializer_range
__lowercase =layer_norm_eps
__lowercase =position_embedding_type
__lowercase =use_cache
| 363
|
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _A ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ):
"""simple docstring"""
if config_name_or_path is None:
__lowercase ='facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__lowercase =generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__lowercase =question_encoder_name_or_path
__lowercase =RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__lowercase =RagConfig.from_pretrained(_lowerCAmelCase )
__lowercase =AutoConfig.from_pretrained(_lowerCAmelCase )
__lowercase =AutoConfig.from_pretrained(_lowerCAmelCase )
__lowercase =gen_config
__lowercase =question_encoder_config
__lowercase =model_class.from_pretrained_question_encoder_generator(
_lowerCAmelCase , _lowerCAmelCase , config=_lowerCAmelCase )
rag_model.save_pretrained(_lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(_lowerCAmelCase )
# Save tokenizers.
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__lowercase =AutoTokenizer.from_pretrained(_lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--model_type""",
choices=["""rag_sequence""", """rag_token"""],
required=True,
type=str,
help="""RAG model type: rag_sequence, rag_token""",
)
parser.add_argument("""--dest""", type=str, required=True, help="""Path to the output checkpoint directory.""")
parser.add_argument("""--generator_name_or_path""", type=str, required=True, help="""Generator model identifier""")
parser.add_argument(
"""--question_encoder_name_or_path""", type=str, required=True, help="""Question encoder model identifier"""
)
parser.add_argument(
"""--generator_tokenizer_name_or_path""",
type=str,
help="""Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``""",
)
parser.add_argument(
"""--question_encoder_tokenizer_name_or_path""",
type=str,
help="""Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``""",
)
parser.add_argument(
"""--config_name_or_path""",
type=str,
help=(
"""Identifier of the model config to use, if not provided, resolves to a base config for a given"""
""" ``model_type``"""
),
)
lowerCamelCase = parser.parse_args()
lowerCamelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 48
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_a : Dict = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = ['BeitFeatureExtractor']
_a : Dict = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Tuple = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : List[str] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_a : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 44
|
"""simple docstring"""
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __A ( unittest.TestCase ):
def __A ( self ):
_lowerCAmelCase : Optional[int] = """ylacombe/bark-small"""
_lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
_lowerCAmelCase : int = """en_speaker_1"""
_lowerCAmelCase : List[Any] = """This is a test string"""
_lowerCAmelCase : Any = """speaker_embeddings_path.json"""
_lowerCAmelCase : List[Any] = """speaker_embeddings"""
def __A ( self , **a__ ):
return AutoTokenizer.from_pretrained(self.checkpoint , **a__ )
def __A ( self ):
shutil.rmtree(self.tmpdirname )
def __A ( self ):
_lowerCAmelCase : List[Any] = self.get_tokenizer()
_lowerCAmelCase : int = BarkProcessor(tokenizer=a__ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : str = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
_lowerCAmelCase : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : List[Any] = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __A ( self ):
_lowerCAmelCase : List[str] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
_lowerCAmelCase : Union[str, Any] = 35
_lowerCAmelCase : Union[str, Any] = 2
_lowerCAmelCase : Optional[int] = 8
_lowerCAmelCase : Dict = {
"""semantic_prompt""": np.ones(a__ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
_lowerCAmelCase : Dict = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Tuple = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(a__ , **a__ )
_lowerCAmelCase : List[Any] = processor(text=self.input_string , voice_preset=a__ )
_lowerCAmelCase : Optional[int] = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a__ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
_lowerCAmelCase : str = processor(text=self.input_string , voice_preset=self.voice_preset )
def __A ( self ):
_lowerCAmelCase : int = self.get_tokenizer()
_lowerCAmelCase : List[Any] = BarkProcessor(tokenizer=a__ )
_lowerCAmelCase : Dict = processor(text=self.input_string )
_lowerCAmelCase : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=256 , add_special_tokens=a__ , return_attention_mask=a__ , return_token_type_ids=a__ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 44
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase : Tuple = {
'configuration_clip': [
'CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'CLIPConfig',
'CLIPOnnxConfig',
'CLIPTextConfig',
'CLIPVisionConfig',
],
'processing_clip': ['CLIPProcessor'],
'tokenization_clip': ['CLIPTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = ['CLIPTokenizerFast']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] = ['CLIPFeatureExtractor']
_lowercase : Any = ['CLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[Any] = [
'CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'CLIPModel',
'CLIPPreTrainedModel',
'CLIPTextModel',
'CLIPTextModelWithProjection',
'CLIPVisionModel',
'CLIPVisionModelWithProjection',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : int = [
'TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCLIPModel',
'TFCLIPPreTrainedModel',
'TFCLIPTextModel',
'TFCLIPVisionModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Tuple = [
'FlaxCLIPModel',
'FlaxCLIPPreTrainedModel',
'FlaxCLIPTextModel',
'FlaxCLIPTextPreTrainedModel',
'FlaxCLIPVisionModel',
'FlaxCLIPVisionPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 362
|
"""simple docstring"""
class _UpperCAmelCase :
def __init__( self : str , _lowercase : list ):
__UpperCAmelCase = set_counts
__UpperCAmelCase = max(_lowercase )
__UpperCAmelCase = len(_lowercase )
__UpperCAmelCase = [1] * num_sets
__UpperCAmelCase = list(range(_lowercase ) )
def a ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
__UpperCAmelCase = self.get_parent(_lowercase )
__UpperCAmelCase = self.get_parent(_lowercase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__UpperCAmelCase = 0
__UpperCAmelCase = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__UpperCAmelCase = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__UpperCAmelCase = 0
__UpperCAmelCase = src_parent
__UpperCAmelCase = self.set_counts[src_parent]
__UpperCAmelCase = max(self.max_set , _lowercase )
return True
def a ( self : Optional[Any] , _lowercase : int ):
if self.parents[disj_set] == disj_set:
return disj_set
__UpperCAmelCase = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 86
| 0
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : Tuple = ['''input_features''']
def __init__( self , lowercase=80 , lowercase=1_6000 , lowercase=160 , lowercase=30 , lowercase=400 , lowercase=0.0 , lowercase=False , **lowercase , ) -> Any:
'''simple docstring'''
super().__init__(
feature_size=lowercase , sampling_rate=lowercase , padding_value=lowercase , return_attention_mask=lowercase , **lowercase , )
a__ : Union[str, Any] = n_fft
a__ : Dict = hop_length
a__ : List[str] = chunk_length
a__ : str = chunk_length * sampling_rate
a__ : List[Any] = self.n_samples // hop_length
a__ : List[str] = sampling_rate
a__ : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase , norm='slaney' , mel_scale='slaney' , )
def __lowercase ( self , lowercase) -> np.ndarray:
'''simple docstring'''
a__ : Union[str, Any] = spectrogram(
lowercase , window_function(self.n_fft , 'hann') , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='log10' , )
a__ : Optional[Any] = log_spec[:, :-1]
a__ : Optional[int] = np.maximum(lowercase , log_spec.max() - 8.0)
a__ : Optional[int] = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __lowercase ( lowercase , lowercase , lowercase = 0.0) -> List[np.ndarray]:
'''simple docstring'''
if attention_mask is not None:
a__ : List[Any] = np.array(lowercase , np.intaa)
a__ : int = []
for vector, length in zip(lowercase , attention_mask.sum(-1)):
a__ : List[str] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7)
if length < normed_slice.shape[0]:
a__ : int = padding_value
normed_input_values.append(lowercase)
else:
a__ : Optional[Any] = [(x - x.mean()) / np.sqrt(x.var() + 1e-7) for x in input_values]
return normed_input_values
def __call__( self , lowercase , lowercase = True , lowercase = None , lowercase = None , lowercase = None , lowercase = "max_length" , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
F' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
F' was sampled with {self.sampling_rate} and not {sampling_rate}.')
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
a__ : List[Any] = isinstance(lowercase , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}')
a__ : Optional[int] = is_batched_numpy or (
isinstance(lowercase , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
a__ : Optional[Any] = [np.asarray([speech] , dtype=np.floataa).T for speech in raw_speech]
elif not is_batched and not isinstance(lowercase , np.ndarray):
a__ : Optional[int] = np.asarray(lowercase , dtype=np.floataa)
elif isinstance(lowercase , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
a__ : List[Any] = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
a__ : List[Any] = [np.asarray([raw_speech]).T]
a__ : Optional[int] = BatchFeature({'input_features': raw_speech})
# convert into correct format for padding
a__ : Dict = self.pad(
lowercase , padding=lowercase , max_length=max_length if max_length else self.n_samples , truncation=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
a__ : List[str] = self.zero_mean_unit_var_norm(
padded_inputs['input_features'] , attention_mask=padded_inputs['attention_mask'] , padding_value=self.padding_value , )
a__ : Tuple = np.stack(padded_inputs['input_features'] , axis=0)
# make sure list is in array format
a__ : Tuple = padded_inputs.get('input_features').transpose(2 , 0 , 1)
a__ : Tuple = [self._np_extract_fbank_features(lowercase) for waveform in input_features[0]]
if isinstance(input_features[0] , lowercase):
a__ : Optional[Any] = [np.asarray(lowercase , dtype=np.floataa) for feature in input_features]
else:
a__ : int = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
a__ : List[Any] = padded_inputs['attention_mask'][:, :: self.hop_length]
if return_tensors is not None:
a__ : Tuple = padded_inputs.convert_to_tensors(lowercase)
return padded_inputs
def __lowercase ( self) -> Dict[str, Any]:
'''simple docstring'''
a__ : Tuple = copy.deepcopy(self.__dict__)
a__ : Optional[Any] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 99
|
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(lowercase , 'hidden_sizes'))
self.parent.assertTrue(hasattr(lowercase , 'num_attention_heads'))
self.parent.assertTrue(hasattr(lowercase , 'num_encoder_blocks'))
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=64 , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[16, 32, 64, 128] , lowercase=[1, 4, 8, 16] , lowercase=[1, 2, 4, 8] , lowercase=True , lowercase=True , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=0.02 , lowercase=3 , lowercase=None , ) -> Tuple:
'''simple docstring'''
a__ : Optional[Any] = parent
a__ : int = batch_size
a__ : Tuple = image_size
a__ : Union[str, Any] = num_channels
a__ : str = num_encoder_blocks
a__ : Dict = sr_ratios
a__ : Dict = depths
a__ : Union[str, Any] = hidden_sizes
a__ : str = downsampling_rates
a__ : Tuple = num_attention_heads
a__ : Optional[Any] = is_training
a__ : Union[str, Any] = use_labels
a__ : Any = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : int = attention_probs_dropout_prob
a__ : Optional[Any] = initializer_range
a__ : Tuple = num_labels
a__ : Union[str, Any] = scope
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a__ : str = None
if self.use_labels:
a__ : Dict = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
a__ : Any = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> Any:
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , lowercase , lowercase , lowercase) -> Dict:
'''simple docstring'''
a__ : Dict = SegformerModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Optional[Any] = model(lowercase)
a__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width))
def __lowercase ( self , lowercase , lowercase , lowercase) -> str:
'''simple docstring'''
a__ : Optional[Any] = self.num_labels
a__ : List[str] = SegformerForSemanticSegmentation(lowercase)
model.to(lowercase)
model.eval()
a__ : List[str] = model(lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
a__ : int = model(lowercase , labels=lowercase)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4))
self.parent.assertGreater(result.loss , 0.0)
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] = 1
a__ : Optional[int] = SegformerForSemanticSegmentation(config=lowercase)
model.to(lowercase)
model.eval()
a__ : Union[str, Any] = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size)).to(lowercase)
a__ : Optional[Any] = model(lowercase , labels=lowercase)
self.parent.assertGreater(result.loss , 0.0)
def __lowercase ( self) -> int:
'''simple docstring'''
a__ : Any = self.prepare_config_and_inputs()
a__ , a__ , a__ : str = config_and_inputs
a__ : Optional[int] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
__A : List[str] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__A : List[str] = True
__A : Any = False
__A : Any = False
__A : str = False
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Union[str, Any] = SegformerModelTester(self)
a__ : Optional[Any] = SegformerConfigTester(self , config_class=lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowercase ( self) -> Tuple:
'''simple docstring'''
a__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowercase)
@unittest.skip('SegFormer does not use inputs_embeds')
def __lowercase ( self) -> Tuple:
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods')
def __lowercase ( self) -> str:
'''simple docstring'''
pass
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = model_class(lowercase)
a__ : Dict = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : Optional[int] = [*signature.parameters.keys()]
a__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> str:
'''simple docstring'''
a__ , a__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
a__ : str = True
a__ : List[str] = False
a__ : int = True
a__ : List[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[Any] = outputs.attentions
a__ : Dict = sum(self.model_tester.depths)
self.assertEqual(len(lowercase) , lowercase)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : Dict = True
a__ : int = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[int] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Optional[Any] = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__ : Tuple = (self.model_tester.image_size // 4) ** 2
a__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
a__ : str = (self.model_tester.image_size // 32) ** 2
a__ : Optional[int] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:]) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
a__ : Dict = len(lowercase)
# Check attention is always last and order is fine
a__ : List[Any] = True
a__ : Any = True
a__ : Dict = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__ : int = outputs.attentions
self.assertEqual(len(lowercase) , lowercase)
# verify the first attentions (first block, first layer)
a__ : List[Any] = (self.model_tester.image_size // 4) ** 2
a__ : Union[str, Any] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__ : Optional[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : int = model(**self._prepare_for_class(lowercase , lowercase))
a__ : Union[str, Any] = outputs.hidden_states
a__ : Any = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowercase) , lowercase)
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:]) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
a__ , a__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : int = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def __lowercase ( self) -> Any:
'''simple docstring'''
if not self.model_tester.is_training:
return
a__ , a__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a__ : Tuple = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase):
continue
a__ : Dict = model_class(lowercase)
model.to(lowercase)
model.train()
a__ : str = self._prepare_for_class(lowercase , lowercase , return_labels=lowercase)
a__ : Optional[int] = model(**lowercase).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
pass
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = SegformerModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def A_ ( ) -> int:
a__ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def __lowercase ( self) -> Any:
'''simple docstring'''
a__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : int = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__ : Optional[int] = prepare_img()
a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt')
a__ : List[str] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Optional[int] = model(lowercase)
a__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-4))
@slow
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
a__ : Dict = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024').to(lowercase)
a__ : Dict = prepare_img()
a__ : Optional[int] = image_processor(images=lowercase , return_tensors='pt')
a__ : List[str] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Optional[Any] = model(lowercase)
a__ : List[Any] = torch.Size((1, model.config.num_labels, 128, 128))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Optional[Any] = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowercase , atol=1e-1))
@slow
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase)
a__ : List[str] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512').to(
lowercase)
a__ : Any = prepare_img()
a__ : Optional[Any] = image_processor(images=lowercase , return_tensors='pt')
a__ : Optional[int] = encoded_inputs.pixel_values.to(lowercase)
with torch.no_grad():
a__ : Union[str, Any] = model(lowercase)
a__ : int = outputs.logits.detach().cpu()
a__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=lowercase , target_sizes=[(500, 300)])
a__ : Optional[Any] = torch.Size((500, 300))
self.assertEqual(segmentation[0].shape , lowercase)
a__ : Any = image_processor.post_process_semantic_segmentation(outputs=lowercase)
a__ : Union[str, Any] = torch.Size((128, 128))
self.assertEqual(segmentation[0].shape , lowercase)
| 99
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
snake_case : List[Any] = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
snake_case : str = 128
elif "12-12" in model_name:
snake_case : int = 12
snake_case : str = 12
elif "14-14" in model_name:
snake_case : Optional[int] = 14
snake_case : Any = 14
elif "16-16" in model_name:
snake_case : int = 16
snake_case : int = 16
else:
raise ValueError('''Model not supported''' )
snake_case : List[Any] = '''huggingface/label-files'''
if "speech-commands" in model_name:
snake_case : Dict = 35
snake_case : Union[str, Any] = '''speech-commands-v2-id2label.json'''
else:
snake_case : Dict = 527
snake_case : Union[str, Any] = '''audioset-id2label.json'''
snake_case : Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case : Optional[Any] = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case : int = idalabel
snake_case : int = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> Dict:
'''simple docstring'''
if "module.v" in name:
snake_case : Union[str, Any] = name.replace('''module.v''' , '''audio_spectrogram_transformer''' )
if "cls_token" in name:
snake_case : Optional[Any] = name.replace('''cls_token''' , '''embeddings.cls_token''' )
if "dist_token" in name:
snake_case : List[str] = name.replace('''dist_token''' , '''embeddings.distillation_token''' )
if "pos_embed" in name:
snake_case : Any = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
snake_case : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
# transformer blocks
if "blocks" in name:
snake_case : Tuple = name.replace('''blocks''' , '''encoder.layer''' )
if "attn.proj" in name:
snake_case : Union[str, Any] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
snake_case : List[str] = name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
snake_case : Dict = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
snake_case : Optional[int] = name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
snake_case : Dict = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
snake_case : List[str] = name.replace('''mlp.fc2''' , '''output.dense''' )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
snake_case : List[Any] = name.replace('''audio_spectrogram_transformer.norm''' , '''audio_spectrogram_transformer.layernorm''' )
# classifier head
if "module.mlp_head.0" in name:
snake_case : List[Any] = name.replace('''module.mlp_head.0''' , '''classifier.layernorm''' )
if "module.mlp_head.1" in name:
snake_case : str = name.replace('''module.mlp_head.1''' , '''classifier.dense''' )
return name
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
snake_case : Any = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
snake_case : int = key.split('''.''' )
snake_case : List[Any] = int(key_split[3] )
snake_case : Optional[Any] = config.hidden_size
if "weight" in key:
snake_case : Optional[Any] = val[:dim, :]
snake_case : List[Any] = val[dim : dim * 2, :]
snake_case : int = val[-dim:, :]
else:
snake_case : List[Any] = val[:dim]
snake_case : Union[str, Any] = val[dim : dim * 2]
snake_case : Dict = val[-dim:]
else:
snake_case : Tuple = val
return orig_state_dict
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> List[Any]:
'''simple docstring'''
snake_case : List[Any] = [
'''module.v.head.weight''',
'''module.v.head.bias''',
'''module.v.head_dist.weight''',
'''module.v.head_dist.bias''',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ) -> Any:
'''simple docstring'''
snake_case : int = get_audio_spectrogram_transformer_config(SCREAMING_SNAKE_CASE__ )
snake_case : List[str] = {
'''ast-finetuned-audioset-10-10-0.4593''': (
'''https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.450''': (
'''https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448''': (
'''https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1'''
),
'''ast-finetuned-audioset-10-10-0.448-v2''': (
'''https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1'''
),
'''ast-finetuned-audioset-12-12-0.447''': (
'''https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1'''
),
'''ast-finetuned-audioset-14-14-0.443''': (
'''https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1'''
),
'''ast-finetuned-audioset-16-16-0.442''': (
'''https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1'''
),
'''ast-finetuned-speech-commands-v2''': (
'''https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1'''
),
}
# load original state_dict
snake_case : Optional[Any] = model_name_to_url[model_name]
snake_case : List[str] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location='''cpu''' )
# remove some keys
remove_keys(SCREAMING_SNAKE_CASE__ )
# rename some keys
snake_case : Union[str, Any] = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load 🤗 model
snake_case : Any = ASTForAudioClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
snake_case : Dict = -4.267_7393 if '''speech-commands''' not in model_name else -6.84_5978
snake_case : int = 4.568_9974 if '''speech-commands''' not in model_name else 5.565_4526
snake_case : Union[str, Any] = 1024 if '''speech-commands''' not in model_name else 128
snake_case : List[Any] = ASTFeatureExtractor(mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ )
if "speech-commands" in model_name:
snake_case : Union[str, Any] = load_dataset('''speech_commands''' , '''v0.02''' , split='''validation''' )
snake_case : str = dataset[0]['''audio''']['''array''']
else:
snake_case : Dict = hf_hub_download(
repo_id='''nielsr/audio-spectogram-transformer-checkpoint''' , filename='''sample_audio.flac''' , repo_type='''dataset''' , )
snake_case ,snake_case : Optional[int] = torchaudio.load(SCREAMING_SNAKE_CASE__ )
snake_case : int = waveform.squeeze().numpy()
snake_case : List[Any] = feature_extractor(SCREAMING_SNAKE_CASE__ , sampling_rate=1_6000 , return_tensors='''pt''' )
# forward pass
snake_case : int = model(**SCREAMING_SNAKE_CASE__ )
snake_case : Any = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
snake_case : Union[str, Any] = torch.tensor([-0.8760, -7.0042, -8.6602] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
snake_case : str = torch.tensor([-1.1986, -7.0903, -8.2718] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
snake_case : Any = torch.tensor([-2.6128, -8.0080, -9.4344] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
snake_case : Dict = torch.tensor([-1.5080, -7.4534, -8.8917] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
snake_case : str = torch.tensor([-0.5050, -6.5833, -8.0843] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
snake_case : Dict = torch.tensor([-0.3826, -7.0336, -8.2413] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
snake_case : str = torch.tensor([-1.2113, -6.9101, -8.3470] )
elif model_name == "ast-finetuned-speech-commands-v2":
snake_case : Dict = torch.tensor([6.1589, -8.0566, -8.7984] )
else:
raise ValueError('''Unknown model name''' )
if not torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ):
raise ValueError('''Logits don\'t match''' )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('''Pushing model and feature extractor to the hub...''' )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="ast-finetuned-audioset-10-10-0.4593",
type=str,
help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
lowercase__ = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 83
|
'''simple docstring'''
class snake_case__ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCamelCase__ : list[int] ) -> None:
"""simple docstring"""
snake_case : List[Any] = len(UpperCamelCase__ )
snake_case : Tuple = [0] * len_array
if len_array > 0:
snake_case : List[str] = array[0]
for i in range(1 , UpperCamelCase__ ):
snake_case : Tuple = self.prefix_sum[i - 1] + array[i]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> int:
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def lowerCAmelCase ( self : str , UpperCamelCase__ : int ) -> bool:
"""simple docstring"""
snake_case : int = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(UpperCamelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
"""simple docstring"""
from typing import List
import numpy as np
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :Optional[Any] = {key: len(A__ ) for key, value in gen_kwargs.items() if isinstance(A__, A__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"""Sharding is ambiguous for this dataset: """
+ """we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"""
+ """\n""".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ """\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, """
+ """and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."""
) )
snake_case_ :Tuple = max(lists_lengths.values(), default=0 )
return max(1, A__ )
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Union[str, Any] = []
for group_idx in range(A__ ):
snake_case_ :List[str] = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
snake_case_ :List[str] = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
snake_case_ :Any = range(A__, start + num_shards_to_add )
shards_indices_per_group.append(A__ )
return shards_indices_per_group
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Optional[int] = _number_of_shards_in_gen_kwargs(A__ )
if num_shards == 1:
return [dict(A__ )]
else:
snake_case_ :Tuple = _distribute_shards(num_shards=A__, max_num_jobs=A__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(A__, A__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(A__ ) )
]
def A_ ( _lowercase ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key], A__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :List[str] = {len(A__ ) for value in gen_kwargs.values() if isinstance(A__, A__ )}
snake_case_ :List[str] = {}
for size in list_sizes:
snake_case_ :Optional[int] = list(range(A__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
snake_case_ :int = dict(A__ )
for key, value in shuffled_kwargs.items():
if isinstance(A__, A__ ):
snake_case_ :int = [value[i] for i in indices_per_size[len(A__ )]]
return shuffled_kwargs
| 66
|
'''simple docstring'''
def UpperCamelCase_ ( A__ : int ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def UpperCamelCase_ ( A__ : int = 50_00 ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = [(i * (3 * i - 1)) // 2 for i in range(1 , A__ )]
for i, pentagonal_i in enumerate(A__ ):
for j in range(A__ , len(A__ ) ):
lowerCAmelCase_ : int = pentagonal_nums[j]
lowerCAmelCase_ : Union[str, Any] = pentagonal_i + pentagonal_j
lowerCAmelCase_ : List[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(A__ ) and is_pentagonal(A__ ):
return b
return -1
if __name__ == "__main__":
print(F'''{solution() = }''')
| 120
| 0
|
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def __lowerCamelCase ( lowerCAmelCase__ ):
lowerCAmelCase__ = args.pruning_method
lowerCAmelCase__ = args.threshold
lowerCAmelCase__ = args.model_name_or_path.rstrip('/' )
lowerCAmelCase__ = args.target_model_path
print(F"""Load fine-pruned model from {model_name_or_path}""" )
lowerCAmelCase__ = torch.load(os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) )
lowerCAmelCase__ = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowerCAmelCase__ = tensor
print(F"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowerCAmelCase__ = tensor
print(F"""Copied layer {name}""" )
elif "bias" in name:
lowerCAmelCase__ = tensor
print(F"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowerCAmelCase__ = MagnitudeBinarizer.apply(inputs=lowerCAmelCase__ , threshold=lowerCAmelCase__ )
lowerCAmelCase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase__ = TopKBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase__ = ThresholdBinarizer.apply(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
lowerCAmelCase__ = tensor * mask
print(F"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowerCAmelCase__ = name[:-6]
lowerCAmelCase__ = model[F"""{prefix_}mask_scores"""]
lowerCAmelCase__ , lowerCAmelCase__ = -0.1, 1.1
lowerCAmelCase__ = torch.sigmoid(lowerCAmelCase__ )
lowerCAmelCase__ = s * (r - l) + l
lowerCAmelCase__ = s_bar.clamp(min=0.0 , max=1.0 )
lowerCAmelCase__ = tensor * mask
print(F"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowerCAmelCase__ = os.path.join(
os.path.dirname(lowerCAmelCase__ ) , F"""bertarized_{os.path.basename(lowerCAmelCase__ )}""" )
if not os.path.isdir(lowerCAmelCase__ ):
shutil.copytree(lowerCAmelCase__ , lowerCAmelCase__ )
print(F"""\nCreated folder {target_model_path}""" )
torch.save(lowerCAmelCase__ , os.path.join(lowerCAmelCase__ , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--pruning_method',
choices=['l0', 'magnitude', 'topK', 'sigmoied_threshold'],
type=str,
required=True,
help=(
'Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'
' sigmoied_threshold = Soft movement pruning)'
),
)
parser.add_argument(
'--threshold',
type=float,
required=False,
help=(
'For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'
'For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'
'Not needed for `l0`'
),
)
parser.add_argument(
'--model_name_or_path',
type=str,
required=True,
help='Folder containing the model that was previously fine-pruned',
)
parser.add_argument(
'--target_model_path',
default=None,
type=str,
required=False,
help='Folder containing the model that was previously fine-pruned',
)
lowerCAmelCase__ = parser.parse_args()
main(args)
| 363
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCAmelCase__ = {
'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'],
'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'AdaptiveEmbedding',
'TransfoXLForSequenceClassification',
'TransfoXLLMHeadModel',
'TransfoXLModel',
'TransfoXLPreTrainedModel',
'load_tf_weights_in_transfo_xl',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAdaptiveEmbedding',
'TFTransfoXLForSequenceClassification',
'TFTransfoXLLMHeadModel',
'TFTransfoXLMainLayer',
'TFTransfoXLModel',
'TFTransfoXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 119
| 0
|
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(_SCREAMING_SNAKE_CASE ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333
|
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Tuple = {
'b0': efficientnet.EfficientNetBa,
'b1': efficientnet.EfficientNetBa,
'b2': efficientnet.EfficientNetBa,
'b3': efficientnet.EfficientNetBa,
'b4': efficientnet.EfficientNetBa,
'b5': efficientnet.EfficientNetBa,
'b6': efficientnet.EfficientNetBa,
'b7': efficientnet.EfficientNetBa,
}
SCREAMING_SNAKE_CASE__ : Any = {
'b0': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.0,
'image_size': 224,
'dropout_rate': 0.2,
'dw_padding': [],
},
'b1': {
'hidden_dim': 1280,
'width_coef': 1.0,
'depth_coef': 1.1,
'image_size': 240,
'dropout_rate': 0.2,
'dw_padding': [16],
},
'b2': {
'hidden_dim': 1408,
'width_coef': 1.1,
'depth_coef': 1.2,
'image_size': 260,
'dropout_rate': 0.3,
'dw_padding': [5, 8, 16],
},
'b3': {
'hidden_dim': 1536,
'width_coef': 1.2,
'depth_coef': 1.4,
'image_size': 300,
'dropout_rate': 0.3,
'dw_padding': [5, 18],
},
'b4': {
'hidden_dim': 1792,
'width_coef': 1.4,
'depth_coef': 1.8,
'image_size': 380,
'dropout_rate': 0.4,
'dw_padding': [6],
},
'b5': {
'hidden_dim': 2048,
'width_coef': 1.6,
'depth_coef': 2.2,
'image_size': 456,
'dropout_rate': 0.4,
'dw_padding': [13, 27],
},
'b6': {
'hidden_dim': 2304,
'width_coef': 1.8,
'depth_coef': 2.6,
'image_size': 528,
'dropout_rate': 0.5,
'dw_padding': [31],
},
'b7': {
'hidden_dim': 2560,
'width_coef': 2.0,
'depth_coef': 3.1,
'image_size': 600,
'dropout_rate': 0.5,
'dw_padding': [18],
},
}
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : int = EfficientNetConfig()
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["hidden_dim"]
lowerCamelCase : List[str] = CONFIG_MAP[model_name]["width_coef"]
lowerCamelCase : Any = CONFIG_MAP[model_name]["depth_coef"]
lowerCamelCase : Union[str, Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = CONFIG_MAP[model_name]["dropout_rate"]
lowerCamelCase : str = CONFIG_MAP[model_name]["dw_padding"]
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "imagenet-1k-id2label.json"
lowerCamelCase : Any = 1000
lowerCamelCase : Any = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,repo_type="dataset" ) ,"r" ) )
lowerCamelCase : List[str] = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Any = {v: k for k, v in idalabel.items()}
return config
def A ( ) -> int:
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Tuple = Image.open(requests.get(_SCREAMING_SNAKE_CASE ,stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def A ( _SCREAMING_SNAKE_CASE ) -> str:
lowerCamelCase : List[Any] = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : str = EfficientNetImageProcessor(
size={"height": size, "width": size} ,image_mean=[0.485, 0.456, 0.406] ,image_std=[0.47853944, 0.4732864, 0.47434163] ,do_center_crop=_SCREAMING_SNAKE_CASE ,)
return preprocessor
def A ( _SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
lowerCamelCase : Any = [v.split("_" )[0].split("block" )[1] for v in original_param_names if v.startswith("block" )]
lowerCamelCase : Any = sorted(set(_SCREAMING_SNAKE_CASE ) )
lowerCamelCase : Dict = len(_SCREAMING_SNAKE_CASE )
lowerCamelCase : List[Any] = {b: str(_SCREAMING_SNAKE_CASE ) for b, i in zip(_SCREAMING_SNAKE_CASE ,range(_SCREAMING_SNAKE_CASE ) )}
lowerCamelCase : List[Any] = []
rename_keys.append(("stem_conv/kernel:0", "embeddings.convolution.weight") )
rename_keys.append(("stem_bn/gamma:0", "embeddings.batchnorm.weight") )
rename_keys.append(("stem_bn/beta:0", "embeddings.batchnorm.bias") )
rename_keys.append(("stem_bn/moving_mean:0", "embeddings.batchnorm.running_mean") )
rename_keys.append(("stem_bn/moving_variance:0", "embeddings.batchnorm.running_var") )
for b in block_names:
lowerCamelCase : Dict = block_name_mapping[b]
rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') )
rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') )
rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') )
rename_keys.append(
(f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') )
rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') )
rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') )
rename_keys.append(
(f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') )
rename_keys.append(
(f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') )
rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') )
rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') )
rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') )
rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') )
rename_keys.append(
(f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') )
rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') )
rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') )
rename_keys.append(
(f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') )
rename_keys.append(("top_conv/kernel:0", "encoder.top_conv.weight") )
rename_keys.append(("top_bn/gamma:0", "encoder.top_bn.weight") )
rename_keys.append(("top_bn/beta:0", "encoder.top_bn.bias") )
rename_keys.append(("top_bn/moving_mean:0", "encoder.top_bn.running_mean") )
rename_keys.append(("top_bn/moving_variance:0", "encoder.top_bn.running_var") )
lowerCamelCase : Optional[int] = {}
for item in rename_keys:
if item[0] in original_param_names:
lowerCamelCase : List[str] = "efficientnet." + item[1]
lowerCamelCase : int = "classifier.weight"
lowerCamelCase : Union[str, Any] = "classifier.bias"
return key_mapping
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Dict:
for key, value in tf_params.items():
if "normalization" in key:
continue
lowerCamelCase : Tuple = key_mapping[key]
if "_conv" in key and "kernel" in key:
lowerCamelCase : List[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(3 ,2 ,0 ,1 )
elif "depthwise_kernel" in key:
lowerCamelCase : int = torch.from_numpy(_SCREAMING_SNAKE_CASE ).permute(2 ,3 ,0 ,1 )
elif "kernel" in key:
lowerCamelCase : List[str] = torch.from_numpy(np.transpose(_SCREAMING_SNAKE_CASE ) )
else:
lowerCamelCase : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
lowerCamelCase : Optional[int] = model_classes[model_name](
include_top=_SCREAMING_SNAKE_CASE ,weights="imagenet" ,input_tensor=_SCREAMING_SNAKE_CASE ,input_shape=_SCREAMING_SNAKE_CASE ,pooling=_SCREAMING_SNAKE_CASE ,classes=1000 ,classifier_activation="softmax" ,)
lowerCamelCase : List[Any] = original_model.trainable_variables
lowerCamelCase : Tuple = original_model.non_trainable_variables
lowerCamelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
lowerCamelCase : List[str] = param.numpy()
lowerCamelCase : int = list(tf_params.keys() )
# Load HuggingFace model
lowerCamelCase : Union[str, Any] = get_efficientnet_config(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Optional[int] = EfficientNetForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
lowerCamelCase : Tuple = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("Converting parameters..." )
lowerCamelCase : Union[str, Any] = rename_keys(_SCREAMING_SNAKE_CASE )
replace_params(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# Initialize preprocessor and preprocess input image
lowerCamelCase : int = convert_image_processor(_SCREAMING_SNAKE_CASE )
lowerCamelCase : int = preprocessor(images=prepare_img() ,return_tensors="pt" )
# HF model inference
hf_model.eval()
with torch.no_grad():
lowerCamelCase : Optional[Any] = hf_model(**_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = outputs.logits.detach().numpy()
# Original model inference
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Any = CONFIG_MAP[model_name]["image_size"]
lowerCamelCase : Optional[int] = prepare_img().resize((image_size, image_size) ,resample=PIL.Image.NEAREST )
lowerCamelCase : Union[str, Any] = image.img_to_array(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = np.expand_dims(_SCREAMING_SNAKE_CASE ,axis=0 )
lowerCamelCase : Dict = original_model.predict(_SCREAMING_SNAKE_CASE )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ), "The predicted logits are not the same."
print("Model outputs match!" )
if save_model:
# Create folder to save model
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
os.mkdir(_SCREAMING_SNAKE_CASE )
# Save converted model and image processor
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
preprocessor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model and image processor to hub
print(f'''Pushing converted {model_name} to the hub...''' )
lowerCamelCase : int = f'''efficientnet-{model_name}'''
preprocessor.push_to_hub(_SCREAMING_SNAKE_CASE )
hf_model.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='b0',
type=str,
help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='hf_model',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--save_model', action='store_true', help='Save model to local')
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 48
| 0
|
'''simple docstring'''
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
__lowerCAmelCase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class __magic_name__ ( unittest.TestCase ):
def __init__( self : str ,_UpperCAmelCase : Any ,_UpperCAmelCase : int=7 ,_UpperCAmelCase : int=3 ,_UpperCAmelCase : List[str]=18 ,_UpperCAmelCase : str=30 ,_UpperCAmelCase : Tuple=400 ,_UpperCAmelCase : List[Any]=None ,_UpperCAmelCase : List[Any]=True ,_UpperCAmelCase : Any=True ,_UpperCAmelCase : Optional[Any]=None ,):
_a : int = size if size is not None else {'height': 20, 'width': 20}
_a : List[Any] = parent
_a : List[Any] = batch_size
_a : List[Any] = num_channels
_a : str = image_size
_a : Optional[Any] = min_resolution
_a : str = max_resolution
_a : List[Any] = size
_a : int = do_normalize
_a : Any = do_convert_rgb
_a : Tuple = [512, 1024, 2048, 4096]
_a : Tuple = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def __lowercase ( self : List[Any] ):
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def __lowercase ( self : List[Any] ):
_a : List[str] = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
_a : Any = Image.open(requests.get(_UpperCAmelCase ,stream=_UpperCAmelCase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[Any] = PixaStructImageProcessor if is_vision_available() else None
def __lowercase ( self : List[Any] ):
_a : Dict = PixaStructImageProcessingTester(self )
@property
def __lowercase ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : str ):
_a : int = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_convert_rgb' ) )
def __lowercase ( self : Dict ):
_a : List[Any] = self.image_processor_tester.prepare_dummy_image()
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
_a : List[str] = 2048
_a : Dict = image_processor(_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() ,torch.tensor(0.06_06 ) ,atol=1E-3 ,rtol=1E-3 ) )
def __lowercase ( self : Dict ):
# Initialize image_processor
_a : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,Image.Image )
# Test not batched input
_a : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Union[str, Any] = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : Union[str, Any] = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def __lowercase ( self : int ):
# Initialize image_processor
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : int = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,Image.Image )
# Test not batched input
_a : Dict = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
_a : List[str] = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(_UpperCAmelCase ):
_a : str = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
_a : Union[str, Any] = 'Hello'
_a : str = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ,header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : Union[str, Any] = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ,header_text=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def __lowercase ( self : int ):
# Initialize image_processor
_a : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,np.ndarray )
_a : Any = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Dict = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : str = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def __lowercase ( self : Union[str, Any] ):
# Initialize image_processor
_a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase ,torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,torch.Tensor )
# Test not batched input
_a : Union[str, Any] = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Optional[int] = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : Union[str, Any] = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='`Pix2StructImageProcessor` requires `torch>=1.11.0`.' , )
@require_torch
@require_vision
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def __lowercase ( self : Tuple ):
_a : Tuple = PixaStructImageProcessingTester(self ,num_channels=4 )
_a : Tuple = 3
@property
def __lowercase ( self : Dict ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowercase ( self : Union[str, Any] ):
_a : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_normalize' ) )
self.assertTrue(hasattr(_UpperCAmelCase ,'do_convert_rgb' ) )
def __lowercase ( self : Tuple ):
# Initialize image_processor
_a : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase ,Image.Image )
# Test not batched input
_a : int = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
_a : Optional[int] = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
_a : Optional[Any] = image_processor(
_UpperCAmelCase ,return_tensors='pt' ,max_patches=_UpperCAmelCase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
| 107
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class __magic_name__ :
lowerCAmelCase : str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
lowerCAmelCase : str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
lowerCAmelCase : str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
lowerCAmelCase : Optional[str] = field(
default=_UpperCamelCase , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def __lowerCamelCase ( ) -> Union[str, Any]:
_a : Any = HfArgumentParser((ModelArguments,) )
((_a) , ) : Dict = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_a : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_a : Optional[Any] = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_a : List[str] = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_a : Optional[int] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_a : List[Any] = True
_a : int = True
_a : Any = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowerCAmelCase_ , decoder_config=lowerCAmelCase_ , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_a : List[str] = decoder_config.decoder_start_token_id
_a : Optional[int] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_a : Tuple = decoder_config.bos_token_id
if pad_token_id is None:
_a : List[Any] = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_a : Any = decoder_config.eos_token_id
_a : Tuple = decoder_start_token_id
_a : Any = pad_token_id
_a : Dict = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_a : Dict = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_a : int = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 107
| 1
|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Optional[int] = KandinskyVaaPipeline
UpperCamelCase_ : Union[str, Any] = [
'image_embeds',
'negative_image_embeds',
]
UpperCamelCase_ : Any = ['image_embeds', 'negative_image_embeds']
UpperCamelCase_ : Optional[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
UpperCamelCase_ : Optional[Any] = False
@property
def _lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
return 3_2
@property
def _lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
return 3_2
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
return self.time_input_dim
@property
def _lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def _lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
return 1_0_0
@property
def _lowerCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Dict = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
_UpperCAmelCase : str = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def _lowerCAmelCase ( self : Tuple ) -> Tuple:
"""simple docstring"""
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCAmelCase ( self : List[str] ) -> int:
"""simple docstring"""
torch.manual_seed(0 )
_UpperCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Dict = self.dummy_unet
_UpperCAmelCase : Tuple = self.dummy_movq
_UpperCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type="epsilon" , thresholding=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : str=0 ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith("mps" ):
_UpperCAmelCase : str = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = 'cpu'
_UpperCAmelCase : int = self.get_dummy_components()
_UpperCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Optional[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
_UpperCAmelCase : List[str] = output.images
_UpperCAmelCase : Any = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
_UpperCAmelCase : List[str] = image[0, -3:, -3:, -1]
_UpperCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
_UpperCAmelCase : List[Any] = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Tuple = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy" )
_UpperCAmelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = KandinskyVaaPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder" , torch_dtype=torch.floataa )
_UpperCAmelCase : Union[str, Any] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = 'red cat, 4k photo'
_UpperCAmelCase : Union[str, Any] = torch.Generator(device="cuda" ).manual_seed(0 )
_UpperCAmelCase : Tuple = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
_UpperCAmelCase : Optional[int] = torch.Generator(device="cuda" ).manual_seed(0 )
_UpperCAmelCase : str = pipeline(
image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_0_0 , output_type="np" , )
_UpperCAmelCase : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
| 145
|
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.')
| 86
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
SCREAMING_SNAKE_CASE : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Optional[Any] = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
SCREAMING_SNAKE_CASE : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 367
|
"""simple docstring"""
from __future__ import annotations
import queue
class __lowerCamelCase :
def __init__(self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = data
_lowerCAmelCase = None
_lowerCAmelCase = None
def __UpperCAmelCase ( ) -> TreeNode:
"""simple docstring"""
print("""\n********Press N to stop entering at any point of time********\n""" )
_lowerCAmelCase = input("""Enter the value of the root node: """ ).strip().lower()
_lowerCAmelCase = queue.Queue()
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
_lowerCAmelCase = F"""Enter the left node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = left_node
q.put(snake_case_ )
_lowerCAmelCase = F"""Enter the right node of {node_found.data}: """
_lowerCAmelCase = input(snake_case_ ).strip().lower() or """n"""
if check == "n":
return tree_node
_lowerCAmelCase = TreeNode(int(snake_case_ ) )
_lowerCAmelCase = right_node
q.put(snake_case_ )
raise
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
print(node.data , end=""",""" )
pre_order(node.left )
pre_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
in_order(node.left )
print(node.data , end=""",""" )
in_order(node.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = queue.Queue()
q.put(snake_case_ )
while not q.empty():
_lowerCAmelCase = []
while not q.empty():
_lowerCAmelCase = q.get()
print(node_dequeued.data , end=""",""" )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(snake_case_ )
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end=""",""" )
stack.append(snake_case_ )
_lowerCAmelCase = n.left
# end of while means current node doesn't have left child
_lowerCAmelCase = stack.pop()
# start to traverse its right child
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase = []
_lowerCAmelCase = node
while n or stack:
while n:
stack.append(snake_case_ )
_lowerCAmelCase = n.left
_lowerCAmelCase = stack.pop()
print(n.data , end=""",""" )
_lowerCAmelCase = n.right
def __UpperCAmelCase ( snake_case_ : TreeNode ) -> None:
"""simple docstring"""
if not isinstance(snake_case_ , snake_case_ ) or not node:
return
_lowerCAmelCase , _lowerCAmelCase = [], []
_lowerCAmelCase = node
stacka.append(snake_case_ )
while stacka: # to find the reversed order of post order, store it in stack2
_lowerCAmelCase = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(snake_case_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end=""",""" )
def __UpperCAmelCase ( snake_case_ : str = "" , snake_case_ : int=50 , snake_case_ : Dict="*" ) -> str:
"""simple docstring"""
if not s:
return "\n" + width * char
_lowerCAmelCase , _lowerCAmelCase = divmod(width - len(snake_case_ ) - 2 , 2 )
return F"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
SCREAMING_SNAKE_CASE : TreeNode = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 5_0 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 317
| 0
|
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = FlaxStableDiffusionPipeline.from_pretrained(
'stabilityai/stable-diffusion-2' ,revision='bf16' ,dtype=jnp.bfloataa ,)
_UpperCamelCase : Dict = 'A painting of a squirrel eating a burger'
_UpperCamelCase : Any = jax.device_count()
_UpperCamelCase : Any = num_samples * [prompt]
_UpperCamelCase : Tuple = sd_pipe.prepare_inputs(lowerCamelCase__ )
_UpperCamelCase : List[Any] = replicate(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = shard(lowerCamelCase__ )
_UpperCamelCase : str = jax.random.PRNGKey(0 )
_UpperCamelCase : int = jax.random.split(lowerCamelCase__ ,jax.device_count() )
_UpperCamelCase : List[Any] = sd_pipe(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,num_inference_steps=25 ,jit=lowerCamelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCamelCase : int = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase : List[str] = images[0, 253:256, 253:256, -1]
_UpperCamelCase : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase : Dict = jnp.array([0.4_2_3_8, 0.4_4_1_4, 0.4_3_9_5, 0.4_4_5_3, 0.4_6_2_9, 0.4_5_9_0, 0.4_5_3_1, 0.4_5_5_0_8, 0.4_5_1_2] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = 'stabilityai/stable-diffusion-2'
_UpperCamelCase , _UpperCamelCase : Tuple = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCamelCase__ ,subfolder='scheduler' )
_UpperCamelCase , _UpperCamelCase : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
lowerCamelCase__ ,scheduler=lowerCamelCase__ ,revision='bf16' ,dtype=jnp.bfloataa ,)
_UpperCamelCase : Tuple = scheduler_params
_UpperCamelCase : str = 'A painting of a squirrel eating a burger'
_UpperCamelCase : Optional[int] = jax.device_count()
_UpperCamelCase : Optional[int] = num_samples * [prompt]
_UpperCamelCase : Dict = sd_pipe.prepare_inputs(lowerCamelCase__ )
_UpperCamelCase : Dict = replicate(lowerCamelCase__ )
_UpperCamelCase : Dict = shard(lowerCamelCase__ )
_UpperCamelCase : Tuple = jax.random.PRNGKey(0 )
_UpperCamelCase : int = jax.random.split(lowerCamelCase__ ,jax.device_count() )
_UpperCamelCase : Optional[Any] = sd_pipe(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,num_inference_steps=25 ,jit=lowerCamelCase__ )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
_UpperCamelCase : List[Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
_UpperCamelCase : Any = images[0, 253:256, 253:256, -1]
_UpperCamelCase : Dict = jnp.asarray(jax.device_get(image_slice.flatten() ) )
_UpperCamelCase : List[str] = jnp.array([0.4_3_3_6, 0.4_2_9_6_9, 0.4_4_5_3, 0.4_1_9_9, 0.4_2_9_7, 0.4_5_3_1, 0.4_4_3_4, 0.4_4_3_4, 0.4_2_9_7] )
print(F'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 83
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ ):
if num < 0:
return False
_UpperCamelCase : int = num
_UpperCamelCase : int = 0
while num > 0:
_UpperCamelCase : str = rev_num * 1_0 + (num % 1_0)
num //= 1_0
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 83
| 1
|
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : str = """M-CLIP"""
def __init__( self , __lowerCAmelCase=1024 , __lowerCAmelCase=768 , **__lowerCAmelCase ):
UpperCamelCase__ = transformerDimSize
UpperCamelCase__ = imageDimSize
super().__init__(**__lowerCAmelCase )
class __SCREAMING_SNAKE_CASE ( _a ):
snake_case : int = MCLIPConfig
def __init__( self , __lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase ):
super().__init__(__lowerCAmelCase , *__lowerCAmelCase , **__lowerCAmelCase )
UpperCamelCase__ = XLMRobertaModel(__lowerCAmelCase )
UpperCamelCase__ = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase ):
UpperCamelCase__ = self.transformer(input_ids=__lowerCAmelCase , attention_mask=__lowerCAmelCase )[0]
UpperCamelCase__ = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__lowerCAmelCase ), embs
| 351
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__ = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 87
| 0
|
'''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def __UpperCamelCase ( UpperCAmelCase ):
if not isinstance(snake_case__ , snake_case__ ):
raise TypeError('''Undefined for non-integers''' )
elif precision < 1:
raise ValueError('''Undefined for non-natural numbers''' )
lowercase__ : Any = precision
lowercase__ : Optional[int] = ceil(precision / 14 )
lowercase__ : Union[str, Any] = 42_6880 * Decimal(1_0005 ).sqrt()
lowercase__ : int = 1
lowercase__ : Union[str, Any] = 1359_1409
lowercase__ : Dict = Decimal(snake_case__ )
for k in range(1 , snake_case__ ):
lowercase__ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(snake_case__ ) ** 3)
linear_term += 5_4514_0134
exponential_term *= -26_2537_4126_4076_8000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
__a: Dict = 50
print(F'The first {n} digits of pi is: {pi(n)}')
| 198
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__UpperCAmelCase = {
'''configuration_transfo_xl''': ['''TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TransfoXLConfig'''],
'''tokenization_transfo_xl''': ['''TransfoXLCorpus''', '''TransfoXLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AdaptiveEmbedding''',
'''TransfoXLForSequenceClassification''',
'''TransfoXLLMHeadModel''',
'''TransfoXLModel''',
'''TransfoXLPreTrainedModel''',
'''load_tf_weights_in_transfo_xl''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'''TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFAdaptiveEmbedding''',
'''TFTransfoXLForSequenceClassification''',
'''TFTransfoXLLMHeadModel''',
'''TFTransfoXLMainLayer''',
'''TFTransfoXLModel''',
'''TFTransfoXLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119
| 0
|
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
lowercase__ = True
except (ImportError, ModuleNotFoundError):
lowercase__ = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ ) -> str:
re.sub('''<n>''' , '''''' , __lowerCamelCase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__lowerCamelCase ) )
| 368
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = ["""image_processor""", """tokenizer"""]
lowerCamelCase = """CLIPImageProcessor"""
lowerCamelCase = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : Optional[int] , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : str=None , **UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case : int = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase__ , )
snake_case : Optional[Any] = kwargs.pop('''feature_extractor''' )
snake_case : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self : Dict , UpperCamelCase__ : str=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[str]=None , **UpperCamelCase__ : Any ) -> Any:
"""simple docstring"""
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
snake_case : List[str] = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if images is not None:
snake_case : List[Any] = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
snake_case : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def lowerCAmelCase ( self : Any , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ) -> Any:
"""simple docstring"""
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def lowerCAmelCase ( self : Union[str, Any] , *UpperCamelCase__ : str , **UpperCamelCase__ : Any ) -> List[Any]:
"""simple docstring"""
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def lowerCAmelCase ( self : int ) -> str:
"""simple docstring"""
snake_case : int = self.tokenizer.model_input_names
snake_case : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 83
| 0
|
# Function to print upper half of diamond (pyramid)
def __magic_name__ ( A : Dict ):
'''simple docstring'''
for i in range(0, A ):
for _ in range(0, n - i - 1 ): # printing spaces
print(" ", end="" )
for _ in range(0, i + 1 ): # printing stars
print("* ", end="" )
print()
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
for i in range(A, 0, -1 ):
for _ in range(A, 0, -1 ): # printing stars
print("* ", end="" )
print()
for _ in range(n - i + 1, 0, -1 ): # printing spaces
print(" ", end="" )
def __magic_name__ ( A : List[Any] ):
'''simple docstring'''
if n <= 0:
print(" ... .... nothing printing :(" )
return
floyd(A ) # upper half
reverse_floyd(A ) # lower half
if __name__ == "__main__":
print(r'| /\ | |- | |- |--| |\ /| |-')
print(r'|/ \| |- |_ |_ |__| | \/ | |_')
__lowerCAmelCase : Dict = 1
while K:
__lowerCAmelCase : List[str] = int(input('enter the number and , and see the magic : '))
print()
pretty_print(user_number)
__lowerCAmelCase : int = int(input('press 0 to exit... and 1 to continue...'))
print('Good Bye...')
| 107
|
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class snake_case__ :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = field(
metadata={"""help""": """The output directory where the model will be written."""} , )
SCREAMING_SNAKE_CASE_ : str = field(
metadata={
"""help""": (
"""The encoder model checkpoint for weights initialization."""
"""Don't set if you want to train an encoder model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : str = field(
metadata={
"""help""": (
"""The decoder model checkpoint for weights initialization."""
"""Don't set if you want to train a decoder model from scratch."""
)
} , )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained encoder config name or path if not the same as encoder_model_name"""} )
SCREAMING_SNAKE_CASE_ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained decoder config name or path if not the same as decoder_model_name"""} )
def __magic_name__ ( ):
'''simple docstring'''
a = HfArgumentParser((ModelArguments,) )
((a) , ) = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
a = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
a = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
a = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
a = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
a = True
a = True
a = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path, decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path, encoder_config=A, decoder_config=A, )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
a = decoder_config.decoder_start_token_id
a = decoder_config.pad_token_id
if decoder_start_token_id is None:
a = decoder_config.bos_token_id
if pad_token_id is None:
a = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
a = decoder_config.eos_token_id
a = decoder_start_token_id
a = pad_token_id
a = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
a = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
a = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 107
| 1
|
'''simple docstring'''
def A (__lowerCamelCase :list , __lowerCamelCase :list , __lowerCamelCase :int ):
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError("""The length of profit and weight must be same.""" )
if max_weight <= 0:
raise ValueError("""max_weight must greater than zero.""" )
if any(p < 0 for p in profit ):
raise ValueError("""Profit can not be negative.""" )
if any(w < 0 for w in weight ):
raise ValueError("""Weight can not be negative.""" )
# List created to store profit gained for the 1kg in case of each weight
# respectively. Calculate and append profit/weight for each element.
_lowerCAmelCase = [p / w for p, w in zip(lowerCAmelCase__ , lowerCAmelCase__ )]
# Creating a copy of the list and sorting profit/weight in ascending order
_lowerCAmelCase = sorted(lowerCAmelCase__ )
# declaring useful variables
_lowerCAmelCase = len(lowerCAmelCase__ )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = 0
# loop till the total weight do not reach max limit e.g. 15 kg and till i<length
while limit <= max_weight and i < length:
# flag value for encountered greatest element in sorted_profit_by_weight
_lowerCAmelCase = sorted_profit_by_weight[length - i - 1]
_lowerCAmelCase = profit_by_weight.index(lowerCAmelCase__ )
_lowerCAmelCase = -1
# check if the weight encountered is less than the total weight
# encountered before.
if max_weight - limit >= weight[index]:
limit += weight[index]
# Adding profit gained for the given weight 1 ===
# weight[index]/weight[index]
gain += 1 * profit[index]
else:
# Since the weight encountered is greater than limit, therefore take the
# required number of remaining kgs and calculate profit for it.
# weight remaining / weight[index]
gain += (max_weight - limit) / weight[index] * profit[index]
break
i += 1
return gain
if __name__ == "__main__":
print(
"""Input profits, weights, and then max_weight (all positive ints) separated by """
"""spaces."""
)
_lowercase = [int(x) for x in input("""Input profits separated by spaces: """).split()]
_lowercase = [int(x) for x in input("""Input weights separated by spaces: """).split()]
_lowercase = int(input("""Max weight allowed: """))
# Function Call
calc_profit(profit, weight, max_weight)
| 361
|
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , _lowercase , _lowercase=13 , _lowercase=7 , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=True , _lowercase=99 , _lowercase=32 , _lowercase=2 , _lowercase=4 , _lowercase=37 , _lowercase="gelu" , _lowercase=0.1 , _lowercase=0.1 , _lowercase=512 , _lowercase=16 , _lowercase=2 , _lowercase=0.02 , _lowercase=3 , _lowercase=4 , _lowercase=None , ):
"""simple docstring"""
_lowerCAmelCase = parent
_lowerCAmelCase = 13
_lowerCAmelCase = 7
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = 99
_lowerCAmelCase = 384
_lowerCAmelCase = 2
_lowerCAmelCase = 4
_lowerCAmelCase = 37
_lowerCAmelCase = """gelu"""
_lowerCAmelCase = 0.1
_lowerCAmelCase = 0.1
_lowerCAmelCase = 512
_lowerCAmelCase = 16
_lowerCAmelCase = 2
_lowerCAmelCase = 0.02
_lowerCAmelCase = 3
_lowerCAmelCase = 4
_lowerCAmelCase = 128
_lowerCAmelCase = 2
_lowerCAmelCase = 9
_lowerCAmelCase = 1
_lowerCAmelCase = None
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = None
if self.use_input_mask:
_lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase = None
if self.use_token_type_ids:
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase = None
_lowerCAmelCase = None
_lowerCAmelCase = None
if self.use_labels:
_lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowercase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel(config=_lowercase )
_lowerCAmelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
_lowerCAmelCase = [input_ids, input_mask]
_lowerCAmelCase = model(_lowercase )
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForMaskedLM(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForSequenceClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_choices
_lowerCAmelCase = TFConvBertForMultipleChoice(config=_lowercase )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = tf.tile(tf.expand_dims(_lowercase , 1 ) , (1, self.num_choices, 1) )
_lowerCAmelCase = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = self.num_labels
_lowerCAmelCase = TFConvBertForTokenClassification(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertForQuestionAnswering(config=_lowercase )
_lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
_lowerCAmelCase = model(_lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.prepare_config_and_inputs()
(
(
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) , (
_lowerCAmelCase
) ,
) = config_and_inputs
_lowerCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowercase : Union[str, Any] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase : str = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Optional[Any] = False
_lowercase : Dict = False
_lowercase : Any = False
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=_lowercase , hidden_size=37 )
def _lowercase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowercase )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = True
if hasattr(_lowercase , """use_cache""" ):
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
for model_class in self.all_model_classes:
_lowerCAmelCase = self._prepare_for_class(_lowercase , _lowercase )
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = len(model(_lowercase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase , saved_model=_lowercase )
_lowerCAmelCase = os.path.join(_lowercase , """saved_model""" , """1""" )
_lowerCAmelCase = tf.keras.models.load_model(_lowercase )
_lowerCAmelCase = model(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = outputs["""encoder_hidden_states"""]
_lowerCAmelCase = outputs["""encoder_attentions"""]
else:
_lowerCAmelCase = outputs["""hidden_states"""]
_lowerCAmelCase = outputs["""attentions"""]
self.assertEqual(len(_lowercase ) , _lowercase )
_lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(_lowercase )
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase = True
_lowerCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
_lowerCAmelCase = getattr(self.model_tester , """key_length""" , _lowercase )
def check_decoder_attentions_output(_lowercase ):
_lowerCAmelCase = len(_lowercase )
self.assertEqual(out_len % 2 , 0 )
_lowerCAmelCase = outputs.decoder_attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowercase ):
_lowerCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowercase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_lowerCAmelCase = True
_lowerCAmelCase = False
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
_lowerCAmelCase = len(_lowercase )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
if self.is_encoder_decoder:
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_decoder_attentions_output(_lowercase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
# Check attention is always last and order is fine
_lowerCAmelCase = True
_lowerCAmelCase = True
_lowerCAmelCase = model_class(_lowercase )
_lowerCAmelCase = model(self._prepare_for_class(_lowercase , _lowercase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowercase ) )
self.assertEqual(model.config.output_hidden_states , _lowercase )
check_encoder_attentions_output(_lowercase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
_lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase = model(_lowercase )[0]
_lowerCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , _lowercase )
_lowerCAmelCase = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowercase , atol=1e-4 )
| 229
| 0
|
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE_ : str =True
SCREAMING_SNAKE_CASE_ : Dict ="ml.p3.2xlarge"
SCREAMING_SNAKE_CASE_ : Dict ="accelerate_sagemaker_execution_role"
SCREAMING_SNAKE_CASE_ : Any ="hf-sm"
SCREAMING_SNAKE_CASE_ : List[Any] ="us-east-1"
SCREAMING_SNAKE_CASE_ : Any =1
SCREAMING_SNAKE_CASE_ : List[Any] ="accelerate-sagemaker-1"
SCREAMING_SNAKE_CASE_ : Optional[Any] ="1.6"
SCREAMING_SNAKE_CASE_ : Dict ="4.4"
SCREAMING_SNAKE_CASE_ : Optional[Any] ="train.py"
SCREAMING_SNAKE_CASE_ : List[Any] =[
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
SCREAMING_SNAKE_CASE_ : Tuple =[
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Tuple ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , __A )
assert isinstance(converted_args['do_train'] , __A )
assert isinstance(converted_args['epochs'] , __A )
assert isinstance(converted_args['learning_rate'] , __A )
assert isinstance(converted_args['max_steps'] , __A )
with pytest.raises(__A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 53
|
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Optional[Any] = """efficientnet"""
def __init__( self : List[Any] , lowerCAmelCase : int = 3 , lowerCAmelCase : int = 600 , lowerCAmelCase : float = 2.0 , lowerCAmelCase : float = 3.1 , lowerCAmelCase : int = 8 , lowerCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , lowerCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , lowerCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , lowerCAmelCase : List[int] = [] , lowerCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , lowerCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , lowerCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , lowerCAmelCase : float = 0.25 , lowerCAmelCase : str = "swish" , lowerCAmelCase : int = 2560 , lowerCAmelCase : str = "mean" , lowerCAmelCase : float = 0.02 , lowerCAmelCase : float = 0.001 , lowerCAmelCase : float = 0.99 , lowerCAmelCase : float = 0.5 , lowerCAmelCase : float = 0.2 , **lowerCAmelCase : Tuple , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**lowerCAmelCase)
_snake_case : Optional[int] = num_channels
_snake_case : str = image_size
_snake_case : Tuple = width_coefficient
_snake_case : List[str] = depth_coefficient
_snake_case : List[Any] = depth_divisor
_snake_case : str = kernel_sizes
_snake_case : Any = in_channels
_snake_case : Optional[Any] = out_channels
_snake_case : str = depthwise_padding
_snake_case : Tuple = strides
_snake_case : Dict = num_block_repeats
_snake_case : int = expand_ratios
_snake_case : Tuple = squeeze_expansion_ratio
_snake_case : Optional[int] = hidden_act
_snake_case : Optional[int] = hidden_dim
_snake_case : Tuple = pooling_type
_snake_case : Tuple = initializer_range
_snake_case : List[Any] = batch_norm_eps
_snake_case : Optional[Any] = batch_norm_momentum
_snake_case : str = dropout_rate
_snake_case : Union[str, Any] = drop_connect_rate
_snake_case : Optional[int] = sum(lowerCAmelCase) * 4
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
snake_case_ : Tuple = version.parse("""1.11""" )
@property
def UpperCamelCase_ ( self : Optional[Any]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
])
@property
def UpperCamelCase_ ( self : Union[str, Any]) -> float:
"""simple docstring"""
return 1E-5
| 317
| 0
|
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def UpperCAmelCase ( a_ , a_ , **a_ ) -> Tuple:
"""simple docstring"""
__A = AutoConfig.from_pretrained(a_ , **a_ )
__A = AutoModelForSeqaSeqLM.from_config(a_ )
model.save_pretrained(a_ )
AutoTokenizer.from_pretrained(a_ ).save_pretrained(a_ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 124
|
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
SCREAMING_SNAKE_CASE :Any = logging.get_logger(__name__)
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,*A : Dict ,**A : Dict ):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." ,A ,)
super().__init__(*A ,**A )
| 124
| 1
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : List[Any] ={
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9
|
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Any = []
lowercase__ : Optional[int] = []
lowercase__ : Tuple = []
for rt in rc.restypes:
lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names])
lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types])
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names])
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14)
restype_atomaa_to_atomaa_list.append([0] * 37)
restype_atomaa_mask_list.append([0.0] * 14)
lowercase__ : Union[str, Any] = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : str = torch.tensor(
_lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , )
lowercase__ : List[str] = torch.tensor(
_lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , )
lowercase__ : str = protein["aatype"].to(torch.long)
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = restype_atomaa_mask[protein_aatype]
lowercase__ : List[Any] = residx_atomaa_mask
lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype]
lowercase__ : str = residx_atomaa_to_atomaa.long()
# create the corresponding mask
lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device)
for restype, restype_letter in enumerate(rc.restypes):
lowercase__ : Tuple = rc.restype_atoa[restype_letter]
lowercase__ : List[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
lowercase__ : Optional[int] = rc.atom_order[atom_name]
lowercase__ : Tuple = 1
lowercase__ : Dict = restype_atomaa_mask[protein_aatype]
lowercase__ : Any = residx_atomaa_mask
return protein
def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]):
lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray)
lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase))
return out
| 87
| 0
|
import os
from tempfile import TemporaryDirectory
from unittest import TestCase
import pytest
from absl.testing import parameterized
from datasets import config
from datasets.arrow_reader import HF_GCP_BASE_URL
from datasets.builder import DatasetBuilder
from datasets.dataset_dict import IterableDatasetDict
from datasets.iterable_dataset import IterableDataset
from datasets.load import dataset_module_factory, import_main_class
from datasets.utils.file_utils import cached_path
lowerCAmelCase = [
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.de'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.en'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.fr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.frr'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.it'''},
{'''dataset''': '''wikipedia''', '''config_name''': '''20220301.simple'''},
{'''dataset''': '''snli''', '''config_name''': '''plain_text'''},
{'''dataset''': '''eli5''', '''config_name''': '''LFQA_reddit'''},
{'''dataset''': '''wiki40b''', '''config_name''': '''en'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.compressed'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.nq.no_index'''},
{'''dataset''': '''wiki_dpr''', '''config_name''': '''psgs_w100.multiset.no_index'''},
{'''dataset''': '''natural_questions''', '''config_name''': '''default'''},
]
def _lowerCamelCase( lowercase__=True ) -> List[Any]:
'''simple docstring'''
if with_config:
return [
{
"testcase_name": d["dataset"] + "/" + d["config_name"],
"dataset": d["dataset"],
"config_name": d["config_name"],
}
for d in DATASETS_ON_HF_GCP
]
else:
return [
{"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP}
]
@parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=A_ ) )
class A ( A_ ):
UpperCamelCase_ : Optional[int] =None
UpperCamelCase_ : str =None
def _A (self , lowerCAmelCase , lowerCAmelCase ):
with TemporaryDirectory() as tmp_dir:
__lowercase= dataset_module_factory(lowerCAmelCase , cache_dir=lowerCAmelCase )
__lowercase= import_main_class(dataset_module.module_path , dataset=lowerCAmelCase )
__lowercase= builder_cls(
cache_dir=lowerCAmelCase , config_name=lowerCAmelCase , hash=dataset_module.hash , )
__lowercase= '/'.join(
[
HF_GCP_BASE_URL,
builder_instance._relative_data_dir(with_hash=lowerCAmelCase ).replace(os.sep , '/' ),
config.DATASET_INFO_FILENAME,
] )
__lowercase= cached_path(lowerCAmelCase , cache_dir=lowerCAmelCase )
self.assertTrue(os.path.exists(lowerCAmelCase ) )
@pytest.mark.integration
def _lowerCamelCase( lowercase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowercase= tmp_path_factory.mktemp('test_hf_gcp' ) / 'test_wikipedia_simple'
__lowercase= dataset_module_factory('wikipedia' , cache_dir=lowercase__ )
__lowercase= import_main_class(dataset_module.module_path )
__lowercase= builder_cls(
cache_dir=lowercase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
# use the HF cloud storage, not the original download_and_prepare that uses apache-beam
__lowercase= None
builder_instance.download_and_prepare()
__lowercase= builder_instance.as_dataset()
assert ds
@pytest.mark.integration
def _lowerCamelCase( lowercase__ ) -> Tuple:
'''simple docstring'''
__lowercase= dataset_module_factory('wikipedia' , cache_dir=lowercase__ )
__lowercase= import_main_class(dataset_module.module_path , dataset=lowercase__ )
__lowercase= builder_cls(
cache_dir=lowercase__ , config_name='20220301.frr' , hash=dataset_module.hash , )
__lowercase= builder_instance.as_streaming_dataset()
assert ds
assert isinstance(lowercase__ , lowercase__ )
assert "train" in ds
assert isinstance(ds['train'] , lowercase__ )
assert next(iter(ds['train'] ) )
| 304
|
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A ( A_ ):
def _A (self ):
__lowercase= self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'embed_dim' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase , 'num_heads' ) )
class A :
def __init__(self , lowerCAmelCase , lowerCAmelCase=1_3 , lowerCAmelCase=6_4 , lowerCAmelCase=3 , lowerCAmelCase=[1_6, 4_8, 9_6] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 1_0] , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=0.02 , lowerCAmelCase=1E-12 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=2 , ):
__lowercase= parent
__lowercase= batch_size
__lowercase= image_size
__lowercase= patch_sizes
__lowercase= patch_stride
__lowercase= patch_padding
__lowercase= is_training
__lowercase= use_labels
__lowercase= num_labels
__lowercase= num_channels
__lowercase= embed_dim
__lowercase= num_heads
__lowercase= stride_kv
__lowercase= depth
__lowercase= cls_token
__lowercase= attention_drop_rate
__lowercase= initializer_range
__lowercase= layer_norm_eps
def _A (self ):
__lowercase= floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowercase= None
if self.use_labels:
__lowercase= ids_tensor([self.batch_size] , self.num_labels )
__lowercase= self.get_config()
return config, pixel_values, labels
def _A (self ):
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= CvtModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase )
__lowercase= (self.image_size, self.image_size)
__lowercase, __lowercase= image_size[0], image_size[1]
for i in range(len(self.depth ) ):
__lowercase= floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
__lowercase= floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def _A (self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= self.num_labels
__lowercase= CvtForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
__lowercase= model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A (self ):
__lowercase= self.prepare_config_and_inputs()
__lowercase, __lowercase, __lowercase= config_and_inputs
__lowercase= {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A ( A_ , A_ , unittest.TestCase ):
UpperCamelCase_ : Optional[int] =(CvtModel, CvtForImageClassification) if is_torch_available() else ()
UpperCamelCase_ : List[str] =(
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ : str =False
UpperCamelCase_ : List[Any] =False
UpperCamelCase_ : Any =False
UpperCamelCase_ : Union[str, Any] =False
UpperCamelCase_ : Tuple =False
def _A (self ):
__lowercase= CvtModelTester(self )
__lowercase= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=3_7 )
def _A (self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A (self ):
return
@unittest.skip(reason='Cvt does not output attentions' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def _A (self ):
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def _A (self ):
pass
def _A (self ):
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= model_class(lowerCAmelCase )
__lowercase= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowercase= [*signature.parameters.keys()]
__lowercase= ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def _A (self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
__lowercase= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
__lowercase= model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
__lowercase= outputs.hidden_states
__lowercase= len(self.model_tester.depth )
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
__lowercase, __lowercase= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowercase= True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def _A (self ):
__lowercase= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _A (self ):
pass
@slow
def _A (self ):
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase= CvtModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def _lowerCamelCase( ) -> Optional[int]:
'''simple docstring'''
__lowercase= Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class A ( unittest.TestCase ):
@cached_property
def _A (self ):
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _A (self ):
__lowercase= CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCAmelCase )
__lowercase= self.default_image_processor
__lowercase= prepare_img()
__lowercase= image_processor(images=lowerCAmelCase , return_tensors='pt' ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
__lowercase= model(**lowerCAmelCase )
# verify the logits
__lowercase= torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
__lowercase= torch.tensor([0.92_85, 0.90_15, -0.31_50] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1E-4 ) )
| 304
| 1
|
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class _SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Dict ):
super().__init__()
UpperCamelCase :List[str] = nn.Linear(3 , 4 )
UpperCamelCase :Dict = nn.BatchNormad(4 )
UpperCamelCase :Optional[Any] = nn.Linear(4 , 5 )
def _A ( self : Tuple , __lowerCamelCase : List[str] ):
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : Tuple ):
UpperCamelCase :List[Any] = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , model.state_dict() )
UpperCamelCase :Dict = os.path.join(__lowerCamelCase , """index.json""" )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
UpperCamelCase :Union[str, Any] = os.path.join(__lowerCamelCase , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
# TODO: add tests on the fact weights are properly loaded
def _A ( self : Optional[Any] ):
UpperCamelCase :List[str] = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
UpperCamelCase :Optional[Any] = torch.randn(2 , 3 , dtype=__lowerCamelCase )
with TemporaryDirectory() as tmp_dir:
UpperCamelCase :Dict = offload_weight(__lowerCamelCase , """weight""" , __lowerCamelCase , {} )
UpperCamelCase :Optional[Any] = os.path.join(__lowerCamelCase , """weight.dat""" )
self.assertTrue(os.path.isfile(__lowerCamelCase ) )
self.assertDictEqual(__lowerCamelCase , {"""weight""": {"""shape""": [2, 3], """dtype""": str(__lowerCamelCase ).split(""".""" )[1]}} )
UpperCamelCase :str = load_offloaded_weight(__lowerCamelCase , index["""weight"""] )
self.assertTrue(torch.equal(__lowerCamelCase , __lowerCamelCase ) )
def _A ( self : str ):
UpperCamelCase :List[str] = ModelForTest()
UpperCamelCase :Optional[int] = model.state_dict()
UpperCamelCase :Optional[Any] = {k: v for k, v in state_dict.items() if """linear2""" not in k}
UpperCamelCase :List[str] = {k: v for k, v in state_dict.items() if """linear2""" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
UpperCamelCase :Tuple = {k: v for k, v in state_dict.items() if """weight""" in k}
UpperCamelCase :List[Any] = {k: v for k, v in state_dict.items() if """weight""" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase :Optional[int] = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(__lowerCamelCase , __lowerCamelCase )
# Duplicates are removed
UpperCamelCase :Optional[int] = OffloadedWeightsLoader(state_dict=__lowerCamelCase , save_folder=__lowerCamelCase )
# Every key is there with the right value
self.assertEqual(sorted(__lowerCamelCase ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(__lowerCamelCase , weight_map[key] ) )
def _A ( self : int ):
UpperCamelCase :str = {"""a.1""": 0, """a.10""": 1, """a.2""": 2}
UpperCamelCase :Optional[int] = extract_submodules_state_dict(__lowerCamelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(__lowerCamelCase , {"""a.1""": 0, """a.2""": 2} )
UpperCamelCase :List[Any] = {"""a.1.a""": 0, """a.10.a""": 1, """a.2.a""": 2}
UpperCamelCase :List[Any] = extract_submodules_state_dict(__lowerCamelCase , ["""a.1""", """a.2"""] )
self.assertDictEqual(__lowerCamelCase , {"""a.1.a""": 0, """a.2.a""": 2} )
| 38
|
'''simple docstring'''
def A__ ( UpperCAmelCase_ = 1_0_0_0 ):
_UpperCamelCase : Dict = 3
_UpperCamelCase : Any = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F"""{solution() = }""")
| 83
| 0
|
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCamelCase ( unittest.TestCase ):
def a_ ( self) -> List[str]:
snake_case_ = tempfile.mkdtemp()
snake_case_ = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
snake_case_ = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
snake_case_ = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48145466, 0.4578275, 0.40821073],
'image_std': [0.26862954, 0.26130258, 0.27577711],
}
snake_case_ = os.path.join(self.tmpdirname, lowerCAmelCase__)
with open(self.image_processor_file, 'w', encoding='utf-8') as fp:
json.dump(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Tuple:
return BertTokenizer.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> Tuple:
return BertTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self, **lowerCAmelCase__) -> List[Any]:
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname, **lowerCAmelCase__)
def a_ ( self) -> Optional[int]:
shutil.rmtree(self.tmpdirname)
def a_ ( self) -> str:
snake_case_ = [np.random.randint(255, size=(3, 30, 400), dtype=np.uinta)]
snake_case_ = [Image.fromarray(np.moveaxis(lowerCAmelCase__, 0, -1)) for x in image_inputs]
return image_inputs
def a_ ( self) -> Any:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = self.get_image_processor()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_slow.save_pretrained(self.tmpdirname)
snake_case_ = AlignProcessor.from_pretrained(self.tmpdirname, use_fast=lowerCAmelCase__)
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
processor_fast.save_pretrained(self.tmpdirname)
snake_case_ = AlignProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor_slow.tokenizer.get_vocab(), tokenizer_slow.get_vocab())
self.assertEqual(processor_fast.tokenizer.get_vocab(), tokenizer_fast.get_vocab())
self.assertEqual(tokenizer_slow.get_vocab(), tokenizer_fast.get_vocab())
self.assertIsInstance(processor_slow.tokenizer, lowerCAmelCase__)
self.assertIsInstance(processor_fast.tokenizer, lowerCAmelCase__)
self.assertEqual(processor_slow.image_processor.to_json_string(), image_processor.to_json_string())
self.assertEqual(processor_fast.image_processor.to_json_string(), image_processor.to_json_string())
self.assertIsInstance(processor_slow.image_processor, lowerCAmelCase__)
self.assertIsInstance(processor_fast.image_processor, lowerCAmelCase__)
def a_ ( self) -> Union[str, Any]:
snake_case_ = AlignProcessor(tokenizer=self.get_tokenizer(), image_processor=self.get_image_processor())
processor.save_pretrained(self.tmpdirname)
snake_case_ = self.get_tokenizer(bos_token='(BOS)', eos_token='(EOS)')
snake_case_ = self.get_image_processor(do_normalize=lowerCAmelCase__, padding_value=1.0)
snake_case_ = AlignProcessor.from_pretrained(
self.tmpdirname, bos_token='(BOS)', eos_token='(EOS)', do_normalize=lowerCAmelCase__, padding_value=1.0)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, lowerCAmelCase__)
self.assertEqual(processor.image_processor.to_json_string(), image_processor_add_kwargs.to_json_string())
self.assertIsInstance(processor.image_processor, lowerCAmelCase__)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = self.prepare_image_inputs()
snake_case_ = image_processor(lowerCAmelCase__, return_tensors='np')
snake_case_ = processor(images=lowerCAmelCase__, return_tensors='np')
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum(), input_processor[key].sum(), delta=1e-2)
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = processor(text=lowerCAmelCase__)
snake_case_ = tokenizer(lowerCAmelCase__, padding='max_length', max_length=64)
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def a_ ( self) -> Any:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'])
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase__):
processor()
def a_ ( self) -> Optional[Any]:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case_ = processor.batch_decode(lowerCAmelCase__)
snake_case_ = tokenizer.batch_decode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = self.get_image_processor()
snake_case_ = self.get_tokenizer()
snake_case_ = AlignProcessor(tokenizer=lowerCAmelCase__, image_processor=lowerCAmelCase__)
snake_case_ = 'lower newer'
snake_case_ = self.prepare_image_inputs()
snake_case_ = processor(text=lowerCAmelCase__, images=lowerCAmelCase__)
self.assertListEqual(list(inputs.keys()), processor.model_input_names)
| 312
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCamelCase = get_tests_dir('''fixtures/spiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer
SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def a_ ( self) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>')
tokenizer.save_pretrained(self.tmpdirname)
def a_ ( self, lowerCAmelCase__) -> Any:
snake_case_ = 'this is a test'
snake_case_ = 'this is a test'
return input_text, output_text
def a_ ( self) -> Optional[int]:
snake_case_ = '<pad>'
snake_case_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0], '<pad>')
self.assertEqual(vocab_keys[1], '<unk>')
self.assertEqual(vocab_keys[-1], '[PAD]')
self.assertEqual(len(lowerCAmelCase__), 3_0001)
def a_ ( self) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 3_0000)
def a_ ( self) -> List[str]:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> str:
pass
@unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.')
def a_ ( self) -> List[Any]:
pass
def a_ ( self) -> str:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> List[Any]:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ]
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Any:
# fmt: off
snake_case_ = ' \tHeLLo!how \n Are yoU? '
snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?']
# fmt: on
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Dict:
snake_case_ = self.get_tokenizer()
snake_case_ = self.get_rust_tokenizer()
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__))
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = self.get_rust_tokenizer()
snake_case_ = tokenizer.encode(lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> int:
snake_case_ = 'This is a test'
snake_case_ = [13, 1, 4398, 25, 21, 1289]
snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test']
snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test']
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__)
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
# fmt: off
snake_case_ = 'I was born in 92000, and this is falsé.'
snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ]
snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ]
# fmt: on
snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__)
self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__)
def a_ ( self) -> Tuple:
snake_case_ = DebertaVaTokenizer(lowerCAmelCase__)
snake_case_ = tokenizer.encode('sequence builders')
snake_case_ = tokenizer.encode('multi-sequence build')
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__)
snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__)
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__)
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, )
@slow
def a_ ( self) -> Union[str, Any]:
# fmt: off
snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
| 312
| 1
|
from __future__ import annotations
import numpy as np
def __lowerCAmelCase ( a__ ) -> tuple[np.ndarray, np.ndarray]:
__a , __a = np.shape(a__ )
if rows != columns:
__a = (
'''\'table\' has to be of square shaped array but got a '''
F"""{rows}x{columns} array:\n{table}"""
)
raise ValueError(a__ )
__a = np.zeros((rows, columns) )
__a = np.zeros((rows, columns) )
for i in range(a__ ):
for j in range(a__ ):
__a = sum(lower[i][k] * upper[k][j] for k in range(a__ ) )
if upper[j][j] == 0:
raise ArithmeticError('''No LU decomposition exists''' )
__a = (table[i][j] - total) / upper[j][j]
__a = 1
for j in range(a__ , a__ ):
__a = sum(lower[i][k] * upper[k][j] for k in range(a__ ) )
__a = table[i][j] - total
return lower, upper
if __name__ == "__main__":
import doctest
doctest.testmod()
| 6
|
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
_A : Optional[Any] = logging.get_logger(__name__)
# General docstring
_A : Optional[Any] = '''ResNetConfig'''
# Base docstring
_A : Tuple = '''microsoft/resnet-50'''
_A : List[str] = [1, 2048, 7, 7]
# Image classification docstring
_A : str = '''microsoft/resnet-50'''
_A : Dict = '''tiger cat'''
_A : List[Any] = [
'''microsoft/resnet-50''',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 3 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Any:
super().__init__()
__lowerCAmelCase = nn.Convad(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , padding=kernel_size // 2 , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ACTaFN[activation] if activation is not None else nn.Identity()
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> List[str]:
super().__init__()
__lowerCAmelCase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCAmelCase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCAmelCase = config.num_channels
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
return embedding
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 ) -> Dict:
super().__init__()
__lowerCAmelCase = nn.Convad(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , stride=SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.BatchNormad(SCREAMING_SNAKE_CASE__ )
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = self.convolution(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.normalization(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" ) -> Dict:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : str = "relu" , SCREAMING_SNAKE_CASE__ : int = 4 ) -> int:
super().__init__()
__lowerCAmelCase = in_channels != out_channels or stride != 1
__lowerCAmelCase = out_channels // reduction
__lowerCAmelCase = (
ResNetShortCut(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase = nn.Sequential(
ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ ) , ResNetConvLayer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , kernel_size=1 , activation=SCREAMING_SNAKE_CASE__ ) , )
__lowerCAmelCase = ACTaFN[activation]
def a ( self : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCAmelCase = hidden_state
__lowerCAmelCase = self.layer(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.shortcut(SCREAMING_SNAKE_CASE__ )
hidden_state += residual
__lowerCAmelCase = self.activation(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int = 2 , SCREAMING_SNAKE_CASE__ : int = 2 , ) -> int:
super().__init__()
__lowerCAmelCase = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
__lowerCAmelCase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , stride=SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) , *[layer(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor ) -> Tensor:
__lowerCAmelCase = input
for layer in self.layers:
__lowerCAmelCase = layer(SCREAMING_SNAKE_CASE__ )
return hidden_state
class _lowercase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : ResNetConfig ) -> Optional[int]:
super().__init__()
__lowerCAmelCase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
SCREAMING_SNAKE_CASE__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCAmelCase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(SCREAMING_SNAKE_CASE__ , config.depths[1:] ):
self.stages.append(ResNetStage(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , depth=SCREAMING_SNAKE_CASE__ ) )
def a ( self : List[str] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : bool = False , SCREAMING_SNAKE_CASE__ : bool = True ) -> BaseModelOutputWithNoAttention:
__lowerCAmelCase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
__lowerCAmelCase = stage_module(SCREAMING_SNAKE_CASE__ )
if output_hidden_states:
__lowerCAmelCase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , hidden_states=SCREAMING_SNAKE_CASE__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = ResNetConfig
_SCREAMING_SNAKE_CASE : Union[str, Any] = """resnet"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = """pixel_values"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = True
def a ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(SCREAMING_SNAKE_CASE__ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any]=False ) -> int:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCAmelCase = value
_A : Dict = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
_A : Optional[int] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
'''
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[str]:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BaseModelOutputWithPoolingAndNoAttention:
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(
SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = encoder_outputs[0]
__lowerCAmelCase = self.pooler(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=SCREAMING_SNAKE_CASE__ , pooler_output=SCREAMING_SNAKE_CASE__ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : Tuple ) -> Any:
super().__init__(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = config.num_labels
__lowerCAmelCase = ResNetModel(SCREAMING_SNAKE_CASE__ )
# classification head
__lowerCAmelCase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.LongTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , ) -> ImageClassifierOutputWithNoAttention:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = self.resnet(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase = self.classifier(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase = """single_label_classification"""
else:
__lowerCAmelCase = """multi_label_classification"""
if self.config.problem_type == "regression":
__lowerCAmelCase = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase = CrossEntropyLoss()
__lowerCAmelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase = BCEWithLogitsLoss()
__lowerCAmelCase = loss_fct(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if not return_dict:
__lowerCAmelCase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=SCREAMING_SNAKE_CASE__ , logits=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , UpperCAmelCase__ , )
class _lowercase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
super().__init__(SCREAMING_SNAKE_CASE__ )
super()._init_backbone(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = [config.embedding_size] + config.hidden_sizes
__lowerCAmelCase = ResNetEmbeddings(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = ResNetEncoder(SCREAMING_SNAKE_CASE__ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(SCREAMING_SNAKE_CASE__ )
@replace_return_docstrings(output_type=SCREAMING_SNAKE_CASE__ , config_class=_CONFIG_FOR_DOC )
def a ( self : List[Any] , SCREAMING_SNAKE_CASE__ : Tensor , SCREAMING_SNAKE_CASE__ : Optional[bool] = None , SCREAMING_SNAKE_CASE__ : Optional[bool] = None ) -> BackboneOutput:
__lowerCAmelCase = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase = self.embedder(SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = self.encoder(SCREAMING_SNAKE_CASE__ , output_hidden_states=SCREAMING_SNAKE_CASE__ , return_dict=SCREAMING_SNAKE_CASE__ )
__lowerCAmelCase = outputs.hidden_states
__lowerCAmelCase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCAmelCase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=SCREAMING_SNAKE_CASE__ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=SCREAMING_SNAKE_CASE__ , )
| 229
| 0
|
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
snake_case_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
snake_case_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''),
('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
]
)
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
lowercase__ : Union[str, Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ : Tuple = val
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
lowercase__ : Optional[Any] = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase__ : Dict = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' )
lowercase__ : Optional[Any] = value
else:
lowercase__ : Optional[Any] = value
return new_state_dict
def snake_case__ ( SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
lowercase__ : int = ''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase__ : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : Optional[Any] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Dict = in_proj_weight[:256, :]
lowercase__ : str = in_proj_bias[:256]
lowercase__ : Union[str, Any] = in_proj_weight[256:512, :]
lowercase__ : Tuple = in_proj_bias[256:512]
lowercase__ : Any = in_proj_weight[-256:, :]
lowercase__ : List[str] = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
lowercase__ : Optional[int] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
lowercase__ : List[str] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase__ : Optional[Any] = in_proj_weight[:256, :]
lowercase__ : Optional[Any] = in_proj_bias[:256]
lowercase__ : Dict = in_proj_weight[256:512, :]
lowercase__ : Optional[int] = in_proj_bias[256:512]
lowercase__ : List[Any] = in_proj_weight[-256:, :]
lowercase__ : List[str] = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
lowercase__ : Any = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
lowercase__ : str = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
lowercase__ : Tuple = in_proj_weight_cross_attn[:256, :]
lowercase__ : Union[str, Any] = in_proj_bias_cross_attn[:256]
lowercase__ : Optional[Any] = in_proj_weight_cross_attn[256:512, :]
lowercase__ : int = in_proj_bias_cross_attn[256:512]
lowercase__ : Tuple = in_proj_weight_cross_attn[-256:, :]
lowercase__ : List[Any] = in_proj_bias_cross_attn[-256:]
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
lowercase__ , lowercase__ : Union[str, Any] = image.size
lowercase__ : Tuple = max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = 800 if 'detection' in checkpoint_url else 1_000
lowercase__ : List[Any] = target_max_size / current_max_size
lowercase__ : Union[str, Any] = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
lowercase__ : str = F.to_tensor(SCREAMING_SNAKE_CASE_ )
lowercase__ : int = F.normalize(SCREAMING_SNAKE_CASE_ , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
'''simple docstring'''
logger.info('Converting model...' )
# load original state dict
lowercase__ : Optional[int] = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
# rename keys
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = rename_backbone_keys(SCREAMING_SNAKE_CASE_ )
# query, key and value matrices need special treatment
read_in_q_k_v(SCREAMING_SNAKE_CASE_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase__ : List[Any] = 'model.'
for key in state_dict.copy().keys():
if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ):
lowercase__ : Dict = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowercase__ : Union[str, Any] = val
# create HuggingFace model and load state dict
lowercase__ : Union[str, Any] = TableTransformerConfig(
backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
lowercase__ : Any = 15
lowercase__ : List[Any] = 2
lowercase__ : Any = {0: 'table', 1: 'table rotated'}
lowercase__ : Tuple = idalabel
lowercase__ : Dict = {v: k for k, v in idalabel.items()}
else:
lowercase__ : Tuple = 125
lowercase__ : int = 6
lowercase__ : Any = {
0: 'table',
1: 'table column',
2: 'table row',
3: 'table column header',
4: 'table projected row header',
5: 'table spanning cell',
}
lowercase__ : List[Any] = idalabel
lowercase__ : Any = {v: k for k, v in idalabel.items()}
lowercase__ : List[Any] = DetrImageProcessor(
format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1_000 )
lowercase__ : Tuple = TableTransformerForObjectDetection(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# verify our conversion
lowercase__ : Tuple = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png'
lowercase__ : Dict = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = Image.open(SCREAMING_SNAKE_CASE_ ).convert('RGB' )
lowercase__ : Any = normalize(resize(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ).unsqueeze(0 )
lowercase__ : Tuple = model(SCREAMING_SNAKE_CASE_ )
if "detection" in checkpoint_url:
lowercase__ : int = (1, 15, 3)
lowercase__ : Dict = torch.tensor(
[[-6.7897, -16.9985, 6.7937], [-8.0186, -22.2192, 6.9677], [-7.3117, -21.0708, 7.4055]] )
lowercase__ : Optional[Any] = torch.tensor([[0.4867, 0.1767, 0.6732], [0.6718, 0.4479, 0.3830], [0.4716, 0.1760, 0.6364]] )
else:
lowercase__ : Optional[int] = (1, 125, 7)
lowercase__ : Tuple = torch.tensor(
[[-18.1430, -8.3214, 4.8274], [-18.4685, -7.1361, -4.2667], [-26.3693, -9.3429, -4.9962]] )
lowercase__ : Optional[Any] = torch.tensor([[0.4983, 0.5595, 0.9440], [0.4916, 0.6315, 0.5954], [0.6108, 0.8637, 0.1135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
# Push model to HF hub
logger.info('Pushing model to the hub...' )
lowercase__ : Optional[int] = (
'microsoft/table-transformer-detection'
if 'detection' in checkpoint_url
else 'microsoft/table-transformer-structure-recognition'
)
model.push_to_hub(SCREAMING_SNAKE_CASE_ )
image_processor.push_to_hub(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
type=str,
choices=[
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''',
'''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''',
],
help='''URL of the Table Transformer checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
snake_case_ = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 216
|
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''vocab_file''': '''vocab.json''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
'''merges_file''': '''merges.txt''',
}
snake_case_ = {
'''vocab_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'''
),
},
'''tokenizer_config_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'''
),
},
'''merges_file''': {
'''facebook/s2t-wav2vec2-large-en-de''': (
'''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'''
),
},
}
snake_case_ = '''</w>'''
snake_case_ = '''@@ '''
def snake_case__ ( SCREAMING_SNAKE_CASE_ : Optional[Any] ):
'''simple docstring'''
lowercase__ : Optional[Any] = set()
lowercase__ : Optional[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase__ : Optional[int] = char
return pairs
# Speech2Text2 has no max input length
snake_case_ = {'''facebook/s2t-wav2vec2-large-en-de''': 1_024}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : List[str] = VOCAB_FILES_NAMES
__lowerCamelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Union[str, Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , a , a="<s>" , a="<pad>" , a="</s>" , a="<unk>" , a=False , a=None , **a , ):
super().__init__(
unk_token=a , bos_token=a , eos_token=a , pad_token=a , do_lower_case=a , **a , )
lowercase__ : str = do_lower_case
with open(a , encoding='utf-8') as vocab_handle:
lowercase__ : Tuple = json.load(a)
lowercase__ : Union[str, Any] = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""")
lowercase__ : int = None
lowercase__ : List[Any] = None
else:
with open(a , encoding='utf-8') as merges_handle:
lowercase__ : List[Any] = merges_handle.read().split('\n')[:-1]
lowercase__ : Optional[int] = [tuple(merge.split()[:2]) for merge in merges]
lowercase__ : Tuple = dict(zip(a , range(len(a))))
lowercase__ : List[str] = {}
@property
def snake_case_ ( self):
return len(self.decoder)
def snake_case_ ( self):
return dict(self.encoder , **self.added_tokens_encoder)
def snake_case_ ( self , a):
lowercase__ : int = tuple(token[:-1]) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowercase__ : Any = get_pairs(a)
if not pairs:
return token
while True:
lowercase__ : List[str] = min(a , key=lambda a: self.bpe_ranks.get(a , float('inf')))
if bigram not in self.bpe_ranks:
break
lowercase__ , lowercase__ : Dict = bigram
lowercase__ : Union[str, Any] = []
lowercase__ : int = 0
while i < len(a):
try:
lowercase__ : Dict = word.index(a , a)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
lowercase__ : Optional[int] = j
if word[i] == first and i < len(a) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
lowercase__ : str = tuple(a)
lowercase__ : Union[str, Any] = new_word
if len(a) == 1:
break
else:
lowercase__ : Optional[Any] = get_pairs(a)
lowercase__ : List[str] = ' '.join(a)
if word == "\n " + BPE_TOKEN_MERGES:
lowercase__ : Optional[int] = '\n' + BPE_TOKEN_MERGES
if word.endswith(a):
lowercase__ : Dict = word.replace(a , '')
lowercase__ : int = word.replace(' ' , a)
lowercase__ : List[str] = word
return word
def snake_case_ ( self , a):
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.')
if self.do_lower_case:
lowercase__ : int = text.lower()
lowercase__ : Optional[int] = text.split()
lowercase__ : Optional[int] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(a).split(' ')))
return split_tokens
def snake_case_ ( self , a):
return self.encoder.get(a , self.encoder.get(self.unk_token))
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = self.decoder.get(a , self.unk_token)
return result
def snake_case_ ( self , a):
lowercase__ : Union[str, Any] = ' '.join(a)
# make sure @@ tokens are concatenated
lowercase__ : Optional[int] = ''.join(string.split(a))
return string
def snake_case_ ( self , a , a = None):
if not os.path.isdir(a):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
lowercase__ : Optional[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
lowercase__ : List[Any] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(a , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=a , ensure_ascii=a) + '\n')
lowercase__ : Optional[Any] = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(a , 'w' , encoding='utf-8') as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda a: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
lowercase__ : Dict = token_index
writer.write(' '.join(a) + '\n')
index += 1
return (vocab_file, merges_file)
| 216
| 1
|
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __lowercase (tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , A , A , A = None , A = None ) -> Optional[int]:
super().__init__()
snake_case : Optional[int] = pad_token_id
snake_case : Optional[Any] = max_length
snake_case : Optional[int] = vocab
snake_case : Any = merges
snake_case : Tuple = BytePairTokenizer(A , A , sequence_length=A )
@classmethod
def UpperCAmelCase ( cls , A , *A , **A ) -> Union[str, Any]:
snake_case : Optional[int] = [""" """.join(A ) for m in tokenizer.bpe_ranks.keys()]
snake_case : Union[str, Any] = tokenizer.get_vocab()
return cls(A , A , *A , **A )
@classmethod
def UpperCAmelCase ( cls , A , *A , **A ) -> List[Any]:
snake_case : Tuple = GPTaTokenizer.from_pretrained(A , *A , **A )
return cls.from_tokenizer(A , *A , **A )
@classmethod
def UpperCAmelCase ( cls , A ) -> Dict:
return cls(**A )
def UpperCAmelCase ( self ) -> Tuple:
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def UpperCAmelCase ( self , A , A = None ) -> List[Any]:
snake_case : List[str] = self.tf_tokenizer(A )
snake_case : Optional[Any] = tf.ones_like(A )
if self.pad_token_id is not None:
# pad the tokens up to max length
snake_case : Tuple = max_length if max_length is not None else self.max_length
if max_length is not None:
snake_case , snake_case : Tuple = pad_model_inputs(
A , max_seq_length=A , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 124
|
from jiwer import compute_measures
import datasets
lowerCamelCase : str = '\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n'
lowerCamelCase : int = '\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n'
lowerCamelCase : str = '\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = ["this is the prediction", "there is an other sample"]\n >>> references = ["this is the reference", "there is another one"]\n >>> wer = datasets.load_metric("wer")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase (datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase ( self ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase ( self , A=None , A=None , A=False ) -> List[Any]:
if concatenate_texts:
return compute_measures(A , A )["wer"]
else:
snake_case : Any = 0
snake_case : Any = 0
for prediction, reference in zip(A , A ):
snake_case : Tuple = compute_measures(A , A )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 124
| 1
|
__A : Dict = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__A : Dict = [{'type': 'code', 'content': INSTALL_CONTENT}]
__A : Optional[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 363
|
from string import ascii_uppercase
__A : int = {str(ord(c) - 55): c for c in ascii_uppercase}
def __UpperCamelCase ( _A : int , _A : int ) ->str:
"""simple docstring"""
if isinstance(_A , _A ):
raise TypeError("""int() can't convert non-string with explicit base""" )
if num < 0:
raise ValueError("""parameter must be positive int""" )
if isinstance(_A , _A ):
raise TypeError("""'str' object cannot be interpreted as an integer""" )
if isinstance(_A , _A ):
raise TypeError("""'float' object cannot be interpreted as an integer""" )
if base in (0, 1):
raise ValueError("""base must be >= 2""" )
if base > 36:
raise ValueError("""base must be <= 36""" )
lowerCamelCase_ =""""""
lowerCamelCase_ =0
lowerCamelCase_ =0
while div != 1:
lowerCamelCase_ , lowerCamelCase_ =divmod(_A , _A )
if base >= 11 and 9 < mod < 36:
lowerCamelCase_ =ALPHABET_VALUES[str(_A )]
else:
lowerCamelCase_ =str(_A )
new_value += actual_value
lowerCamelCase_ =num // base
lowerCamelCase_ =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_A )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 49
| 0
|
'''simple docstring'''
import math
from datetime import datetime, timedelta
def __UpperCAmelCase ( A : int ) -> datetime:
UpperCAmelCase_ : List[str] = year % 1_9
UpperCAmelCase_ : Dict = year % 4
UpperCAmelCase_ : int = year % 7
UpperCAmelCase_ : str = math.floor(year / 1_0_0 )
UpperCAmelCase_ : Union[str, Any] = math.floor((1_3 + 8 * leap_day_inhibits) / 2_5 )
UpperCAmelCase_ : str = leap_day_inhibits / 4
UpperCAmelCase_ : Optional[Any] = (
1_5 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 3_0
UpperCAmelCase_ : Union[str, Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
UpperCAmelCase_ : str = (1_9 * metonic_cycle + secular_moon_shift) % 3_0
# PHM -> Paschal Full Moon
UpperCAmelCase_ : Tuple = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 2_9 and days_from_phm_to_sunday == 6:
return datetime(A , 4 , 1_9 )
elif days_to_add == 2_8 and days_from_phm_to_sunday == 6:
return datetime(A , 4 , 1_8 )
else:
return datetime(A , 3 , 2_2 ) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday ) )
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
_UpperCamelCase : List[str] = 'will be' if year > datetime.now().year else 'was'
print(f'''Easter in {year} {tense} {gauss_easter(year)}''')
| 304
|
'''simple docstring'''
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class snake_case__ ( UpperCamelCase):
def A ( self : List[str] ) -> List[Any]:
UpperCAmelCase_ : int = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , '''embed_dim''' ) )
self.parent.assertTrue(hasattr(_A , '''num_heads''' ) )
class snake_case__ :
def __init__( self : List[Any] , _A : List[str] , _A : Optional[Any]=13 , _A : List[str]=64 , _A : Tuple=3 , _A : int=[16, 48, 96] , _A : int=[1, 3, 6] , _A : Union[str, Any]=[1, 2, 10] , _A : List[Any]=[7, 3, 3] , _A : Optional[Any]=[4, 2, 2] , _A : List[Any]=[2, 1, 1] , _A : Union[str, Any]=[2, 2, 2] , _A : Tuple=[False, False, True] , _A : str=[0.0, 0.0, 0.0] , _A : List[Any]=0.02 , _A : int=1e-12 , _A : Optional[int]=True , _A : List[str]=True , _A : Union[str, Any]=2 , ) -> List[Any]:
UpperCAmelCase_ : int = parent
UpperCAmelCase_ : List[Any] = batch_size
UpperCAmelCase_ : Any = image_size
UpperCAmelCase_ : Tuple = patch_sizes
UpperCAmelCase_ : int = patch_stride
UpperCAmelCase_ : Any = patch_padding
UpperCAmelCase_ : List[Any] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = num_labels
UpperCAmelCase_ : List[str] = num_channels
UpperCAmelCase_ : int = embed_dim
UpperCAmelCase_ : Optional[int] = num_heads
UpperCAmelCase_ : Tuple = stride_kv
UpperCAmelCase_ : Optional[Any] = depth
UpperCAmelCase_ : Dict = cls_token
UpperCAmelCase_ : Dict = attention_drop_rate
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : List[str] = layer_norm_eps
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_ : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ : List[str] = self.get_config()
return config, pixel_values, labels
def A ( self : List[str] ) -> int:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def A ( self : Dict , _A : List[Any] , _A : Tuple , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : List[Any] = CvtModel(config=_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : Tuple = model(_A )
UpperCAmelCase_ : List[str] = (self.image_size, self.image_size)
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
UpperCAmelCase_ : int = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
UpperCAmelCase_ : Optional[Any] = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def A ( self : Any , _A : int , _A : str , _A : Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : str = CvtForImageClassification(_A )
model.to(_A )
model.eval()
UpperCAmelCase_ : int = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Dict ) -> Any:
UpperCAmelCase_ : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = config_and_inputs
UpperCAmelCase_ : Optional[int] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": CvtModel, "image-classification": CvtForImageClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def A ( self : int ) -> List[str]:
UpperCAmelCase_ : Optional[int] = CvtModelTester(self )
UpperCAmelCase_ : List[Any] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=37 )
def A ( self : Any ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A ( self : int ) -> List[str]:
return
@unittest.skip(reason='''Cvt does not output attentions''' )
def A ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip(reason='''Cvt does not use inputs_embeds''' )
def A ( self : Any ) -> Optional[Any]:
pass
@unittest.skip(reason='''Cvt does not support input and output embeddings''' )
def A ( self : List[Any] ) -> Any:
pass
def A ( self : int ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Tuple = model_class(_A )
UpperCAmelCase_ : Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : Tuple = [*signature.parameters.keys()]
UpperCAmelCase_ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _A )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def A ( self : Dict ) -> List[str]:
def check_hidden_states_output(_A : Dict , _A : str , _A : int ):
UpperCAmelCase_ : str = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**self._prepare_for_class(_A , _A ) )
UpperCAmelCase_ : Optional[Any] = outputs.hidden_states
UpperCAmelCase_ : Any = len(self.model_tester.depth )
self.assertEqual(len(_A ) , _A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ : Dict = True
check_hidden_states_output(_A , _A , _A )
def A ( self : Union[str, Any] ) -> List[str]:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def A ( self : List[Any] ) -> Optional[Any]:
pass
@slow
def A ( self : Optional[int] ) -> int:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Optional[Any] = CvtModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def __UpperCAmelCase ( ) -> str:
UpperCAmelCase_ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case__ ( unittest.TestCase):
@cached_property
def A ( self : Union[str, Any] ) -> Union[str, Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def A ( self : str ) -> str:
UpperCAmelCase_ : str = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(_A )
UpperCAmelCase_ : Optional[int] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=_A , return_tensors='''pt''' ).to(_A )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Any = model(**_A )
# verify the logits
UpperCAmelCase_ : Tuple = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _A )
UpperCAmelCase_ : Union[str, Any] = torch.tensor([0.9_285, 0.9_015, -0.3_150] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 304
| 1
|
'''simple docstring'''
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , _lowerCamelCase ) -> Any:
A_ : Dict = data
A_ : Optional[int] = [0x67_45_23_01, 0xEF_CD_AB_89, 0x98_BA_DC_FE, 0x10_32_54_76, 0xC3_D2_E1_F0]
@staticmethod
def UpperCAmelCase_ ( _lowerCamelCase , _lowerCamelCase ) -> Union[str, Any]:
return ((n << b) | (n >> (32 - b))) & 0xFF_FF_FF_FF
def UpperCAmelCase_ ( self ) -> str:
A_ : int = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
A_ : Optional[int] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCAmelCase_ ( self ) -> List[Any]:
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCAmelCase_ ( self , _lowerCamelCase ) -> Optional[Any]:
A_ : Optional[Any] = list(struct.unpack(""">16L""" , _lowerCamelCase ) ) + [0] * 64
for i in range(16 , 80 ):
A_ : Any = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCAmelCase_ ( self ) -> List[str]:
A_ : Any = self.padding()
A_ : List[Any] = self.split_blocks()
for block in self.blocks:
A_ : List[Any] = self.expand_block(_lowerCamelCase )
A_ : str = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
A_ : List[Any] = (b & c) | ((~b) & d)
A_ : Any = 0x5A_82_79_99
elif 20 <= i < 40:
A_ : Tuple = b ^ c ^ d
A_ : List[Any] = 0x6E_D9_EB_A1
elif 40 <= i < 60:
A_ : Union[str, Any] = (b & c) | (b & d) | (c & d)
A_ : List[str] = 0x8F_1B_BC_DC
elif 60 <= i < 80:
A_ : List[Any] = b ^ c ^ d
A_ : Optional[int] = 0xCA_62_C1_D6
A_ : Any = (
self.rotate(_lowerCamelCase , 5 ) + f + e + k + expanded_block[i] & 0xFF_FF_FF_FF,
a,
self.rotate(_lowerCamelCase , 30 ),
c,
d,
)
A_ : Optional[int] = (
self.h[0] + a & 0xFF_FF_FF_FF,
self.h[1] + b & 0xFF_FF_FF_FF,
self.h[2] + c & 0xFF_FF_FF_FF,
self.h[3] + d & 0xFF_FF_FF_FF,
self.h[4] + e & 0xFF_FF_FF_FF,
)
return ("{:08x}" * 5).format(*self.h )
def UpperCAmelCase ( ) -> Tuple:
"""simple docstring"""
A_ : Optional[Any] = B"Test String"
assert SHAaHash(_lowerCamelCase ).final_hash() == hashlib.shaa(_lowerCamelCase ).hexdigest() # noqa: S324
def UpperCAmelCase ( ) -> str:
"""simple docstring"""
A_ : str = argparse.ArgumentParser(description="""Process some strings or files""" )
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
A_ : int = parser.parse_args()
A_ : List[Any] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
A_ : Optional[int] = f.read()
else:
A_ : str = bytes(_lowerCamelCase , """utf-8""" )
print(SHAaHash(_lowerCamelCase ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 370
|
'''simple docstring'''
def UpperCAmelCase ( a_ = 5_0_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
A_ : Union[str, Any] = set()
A_ : List[str] = int((limit - 2_4) ** (1 / 2) )
A_ : Dict = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , a_ ) ) )
for primea in primes:
A_ : Union[str, Any] = primea * primea
for primea in primes:
A_ : Optional[int] = primea * primea * primea
if square + cube >= limit - 1_6:
break
for primea in primes:
A_ : Tuple = primea * primea * primea * primea
A_ : List[str] = square + cube + tetr
if total >= limit:
break
ret.add(a_ )
return len(a_ )
if __name__ == "__main__":
print(f'{solution() = }')
| 164
| 0
|
import numpy as np
def __snake_case ( __UpperCamelCase : np.ndarray ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
def __snake_case ( __UpperCamelCase : np.ndarray ):
"""simple docstring"""
return vector * sigmoid(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 312
|
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=snake_case_ )
class _a ( snake_case_ ):
"""simple docstring"""
_lowerCamelCase : str = field(default='audio-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowerCamelCase : ClassVar[Features] = Features({'audio': Audio()} )
_lowerCamelCase : ClassVar[Features] = Features({'labels': ClassLabel} )
_lowerCamelCase : str = "audio"
_lowerCamelCase : str = "labels"
def __A ( self : str , UpperCAmelCase : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
A_ = copy.deepcopy(self )
A_ = self.label_schema.copy()
A_ = features[self.label_column]
A_ = label_schema
return task_template
@property
def __A ( self : List[str] ):
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 312
| 1
|
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
__snake_case = logging.get_logger(__name__)
class lowercase__ ( _UpperCAmelCase ):
def __init__( self : Dict , *UpperCAmelCase_ : int , **UpperCAmelCase_ : List[Any] ):
warnings.warn(
'The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use LayoutLMv2ImageProcessor instead.' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 361
|
import doctest
from collections import deque
import numpy as np
class lowercase__ :
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ = [2, 1, 2, -1]
SCREAMING_SNAKE_CASE__ = [1, 2, 3, 4]
def A_ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ = len(self.first_signal )
SCREAMING_SNAKE_CASE__ = len(self.second_signal )
SCREAMING_SNAKE_CASE__ = max(UpperCAmelCase_ , UpperCAmelCase_ )
# create a zero matrix of max_length x max_length
SCREAMING_SNAKE_CASE__ = [[0] * max_length for i in range(UpperCAmelCase_ )]
# fills the smaller signal with zeros to make both signals of same length
if length_first_signal < length_second_signal:
self.first_signal += [0] * (max_length - length_first_signal)
elif length_first_signal > length_second_signal:
self.second_signal += [0] * (max_length - length_second_signal)
for i in range(UpperCAmelCase_ ):
SCREAMING_SNAKE_CASE__ = deque(self.second_signal )
rotated_signal.rotate(UpperCAmelCase_ )
for j, item in enumerate(UpperCAmelCase_ ):
matrix[i][j] += item
# multiply the matrix with the first signal
SCREAMING_SNAKE_CASE__ = np.matmul(np.transpose(UpperCAmelCase_ ) , np.transpose(self.first_signal ) )
# rounding-off to two decimal places
return [round(UpperCAmelCase_ , 2 ) for i in final_signal]
if __name__ == "__main__":
doctest.testmod()
| 169
| 0
|
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowercase__ =WebClient(token=os.environ['CI_SLACK_BOT_TOKEN'])
def __UpperCamelCase ( lowerCAmelCase__ : Optional[Any] ):
__a : List[str] = test_results.split(''' ''' )
__a : Dict = 0
__a : str = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
__a : int = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = {}
__a : Optional[Any] = None
__a : Any = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , lowerCAmelCase__ ):
__a : Union[str, Any] = True
__a : Union[str, Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
__a : Optional[Any] = line
__a : List[Any] = False
return failures
class UpperCamelCase__ :
def __init__(self : str , snake_case_ : str , snake_case_ : Dict ):
__a : Union[str, Any] = title
__a : Optional[Any] = doc_test_results['''time_spent'''].split(''',''' )[0]
__a : Tuple = doc_test_results['''success''']
__a : List[str] = doc_test_results['''failures''']
__a : str = self.n_success + self.n_failures
# Failures and success of the modeling tests
__a : Any = doc_test_results
@property
def lowerCAmelCase (self : Tuple ):
__a : Dict = [self._time_spent]
__a : Dict = 0
for time in time_spent:
__a : List[str] = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(snake_case_ ) == 1:
__a : Optional[int] = [0, 0, time_parts[0]]
__a , __a , __a : Optional[int] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_6_0_0 + minutes * 6_0 + seconds
__a , __a , __a : List[str] = total_secs // 3_6_0_0, (total_secs % 3_6_0_0) // 6_0, total_secs % 6_0
return f"{int(snake_case_ )}h{int(snake_case_ )}m{int(snake_case_ )}s"
@property
def lowerCAmelCase (self : Any ):
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowerCAmelCase (self : Union[str, Any] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowerCAmelCase (self : Optional[int] ):
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"
f" {self.time}."
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
@property
def lowerCAmelCase (self : List[Any] ):
__a : List[str] = 4_0
__a : List[str] = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(snake_case_ , snake_case_ )}
__a : int = ''''''
for category, failures in category_failures.items():
if len(snake_case_ ) == 0:
continue
if report != "":
report += "\n\n"
report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(snake_case_ )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"The following examples had failures:\n\n\n{report}\n",
},
}
@property
def lowerCAmelCase (self : Dict ):
__a : List[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(snake_case_ )
@staticmethod
def lowerCAmelCase ():
__a : Optional[int] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(snake_case_ )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=snake_case_ , )
def lowerCAmelCase (self : str ):
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
__a : Any = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else '''All tests passed.'''
__a : Dict = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=snake_case_ , )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Dict , snake_case_ : str , snake_case_ : str , snake_case_ : Dict ):
__a : List[str] = ''''''
for key, value in failures.items():
__a : Any = value[:2_0_0] + ''' [Truncated]''' if len(snake_case_ ) > 2_5_0 else value
failures_text += f"*{key}*\n_{value}_\n\n"
__a : Dict = job_name
__a : Dict = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
__a : List[str] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowerCAmelCase (self : Dict ):
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
__a : Any = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
__a : str = sorted(self.doc_test_results.items() , key=lambda snake_case_ : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
__a : Optional[Any] = f"*Num failures* :{len(job_result['failed'] )} \n"
__a : List[Any] = job_result['''failures''']
__a : int = self.get_reply_blocks(snake_case_ , snake_case_ , snake_case_ , text=snake_case_ )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f"Results for {job}" , blocks=snake_case_ , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def __UpperCamelCase ( ):
__a : int = os.environ['''GITHUB_RUN_ID''']
__a : Union[str, Any] = f"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"
__a : Optional[int] = requests.get(lowerCAmelCase__ ).json()
__a : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__a : Optional[Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCAmelCase__ ):
__a : Any = requests.get(url + f"&page={i + 2}" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCAmelCase__ )
return {}
def __UpperCamelCase ( lowerCAmelCase__ : str ):
__a : int = {}
if os.path.exists(lowerCAmelCase__ ):
__a : List[Any] = os.listdir(lowerCAmelCase__ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , encoding='''utf-8''' ) as f:
__a : Optional[Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"Could not open {os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}." ) from e
return _artifact
def __UpperCamelCase ( ):
class UpperCamelCase__ :
def __init__(self : Optional[Any] , snake_case_ : str ):
__a : Optional[int] = name
__a : int = []
def __str__(self : Dict ):
return self.name
def lowerCAmelCase (self : Any , snake_case_ : str ):
self.paths.append({'''name''': self.name, '''path''': path} )
__a : Dict[str, Artifact] = {}
__a : Tuple = filter(os.path.isdir , os.listdir() )
for directory in directories:
__a : Tuple = directory
if artifact_name not in _available_artifacts:
__a : str = Artifact(lowerCAmelCase__ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase__ )
return _available_artifacts
if __name__ == "__main__":
lowercase__ =get_job_links()
lowercase__ =retrieve_available_artifacts()
lowercase__ =collections.OrderedDict(
[
('*.py', 'API Examples'),
('*.md', 'MD Examples'),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowercase__ ={
v: {
'failed': [],
'failures': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowercase__ =github_actions_job_links.get('run_doctests')
lowercase__ =available_artifacts['doc_tests_gpu_test_reports'].paths[0]
lowercase__ =retrieve_artifact(artifact_path['name'])
if "stats" in artifact:
lowercase__ , lowercase__ , lowercase__ =handle_test_results(artifact['stats'])
lowercase__ =failed
lowercase__ =success
lowercase__ =time_spent[1:-1] + ', '
lowercase__ =extract_first_line_failure(artifact['failures_short'])
for line in artifact["summary_short"].split('\n'):
if re.search('FAILED', line):
lowercase__ =line.replace('FAILED ', '')
lowercase__ =line.split()[0].replace('\n', '')
if "::" in line:
lowercase__ , lowercase__ =line.split('::')
else:
lowercase__ , lowercase__ =line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowercase__ =docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowercase__ =all_failures[test] if test in all_failures else 'N/A'
lowercase__ =failure
break
lowercase__ =Message('🤗 Results of the doc tests.', doc_test_results)
message.post()
message.post_reply()
| 216
|
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def __UpperCamelCase ( lowerCAmelCase__ : Any ):
# vision encoder
if "img_encoder.pos_embed" in name:
__a : Any = name.replace('''img_encoder.pos_embed''' , '''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
__a : str = name.replace('''img_encoder.patch_embed.proj''' , '''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
__a : int = name.replace('''img_encoder.patch_embed.norm''' , '''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
__a : Union[str, Any] = name.replace('''img_encoder.layers''' , '''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
__a : List[Any] = name.replace('''blocks''' , '''layers''' )
if "attn" in name and "pre_assign" not in name:
__a : Tuple = name.replace('''attn''' , '''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
__a : List[Any] = name.replace('''proj''' , '''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
__a : Any = name.replace('''pre_assign_attn.attn.proj''' , '''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
__a : Union[str, Any] = name.replace('''norm1''' , '''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
__a : Optional[int] = name.replace('''norm2''' , '''layer_norm2''' )
if "img_encoder.norm" in name:
__a : Union[str, Any] = name.replace('''img_encoder.norm''' , '''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
__a : List[Any] = name.replace('''text_encoder.token_embedding''' , '''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
__a : Any = name.replace('''text_encoder.positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
__a : Any = name.replace('''text_encoder.transformer.resblocks.''' , '''text_model.encoder.layers.''' )
if "ln_1" in name:
__a : str = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
__a : Union[str, Any] = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
__a : Union[str, Any] = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
__a : Union[str, Any] = name.replace('''c_proj''' , '''fc2''' )
if "text_encoder" in name:
__a : Optional[int] = name.replace('''text_encoder''' , '''text_model''' )
if "ln_final" in name:
__a : str = name.replace('''ln_final''' , '''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
__a : List[str] = name.replace('''img_projector.linear_hidden.''' , '''visual_projection.''' )
if "img_projector.linear_out." in name:
__a : str = name.replace('''img_projector.linear_out.''' , '''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
__a : int = name.replace('''text_projector.linear_hidden''' , '''text_projection''' )
if "text_projector.linear_out" in name:
__a : List[str] = name.replace('''text_projector.linear_out''' , '''text_projection.3''' )
return name
def __UpperCamelCase ( lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Tuple ):
for key in orig_state_dict.copy().keys():
__a : List[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a : Tuple = key.split('''.''' )
__a , __a : List[Any] = int(key_split[2] ), int(key_split[4] )
__a : List[Any] = config.vision_config.hidden_size
if "weight" in key:
__a : int = val[:dim, :]
__a : List[str] = val[dim : dim * 2, :]
__a : List[Any] = val[-dim:, :]
else:
__a : List[str] = val[:dim]
__a : int = val[dim : dim * 2]
__a : Any = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
__a : int = key.split('''.''' )
__a : str = int(key_split[3] )
__a : List[Any] = config.text_config.hidden_size
if "weight" in key:
__a : List[str] = val[:dim, :]
__a : Any = val[
dim : dim * 2, :
]
__a : Dict = val[-dim:, :]
else:
__a : List[str] = val[:dim]
__a : Any = val[dim : dim * 2]
__a : Any = val[-dim:]
else:
__a : Union[str, Any] = rename_key(lowerCAmelCase__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
__a : List[Any] = val.squeeze_()
else:
__a : Dict = val
return orig_state_dict
def __UpperCamelCase ( ):
__a : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
__a : str = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw )
return im
@torch.no_grad()
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[Any]="groupvit-gcc-yfcc" , lowerCAmelCase__ : int=False ):
__a : Union[str, Any] = GroupViTConfig()
__a : int = GroupViTModel(lowerCAmelCase__ ).eval()
__a : Any = torch.load(lowerCAmelCase__ , map_location='''cpu''' )['''model''']
__a : Optional[Any] = convert_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
__a , __a : Dict = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCAmelCase__ ) == 0)
# verify result
__a : Any = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
__a : Optional[Any] = prepare_img()
__a : Optional[int] = processor(text=['''a photo of a cat''', '''a photo of a dog'''] , images=lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' )
with torch.no_grad():
__a : Tuple = model(**lowerCAmelCase__ )
if model_name == "groupvit-gcc-yfcc":
__a : List[str] = torch.tensor([[13.35_23, 6.36_29]] )
elif model_name == "groupvit-gcc-redcaps":
__a : List[str] = torch.tensor([[16.18_73, 8.62_30]] )
else:
raise ValueError(f"Model name {model_name} not supported." )
assert torch.allclose(outputs.logits_per_image , lowerCAmelCase__ , atol=1e-3 )
processor.save_pretrained(lowerCAmelCase__ )
model.save_pretrained(lowerCAmelCase__ )
print('''Successfully saved processor and model to''' , lowerCAmelCase__ )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
model.push_to_hub(lowerCAmelCase__ , organization='''nielsr''' )
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
lowercase__ =parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 216
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase = {
'configuration_blenderbot': [
'BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BlenderbotConfig',
'BlenderbotOnnxConfig',
],
'tokenization_blenderbot': ['BlenderbotTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['BlenderbotTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BlenderbotForCausalLM',
'BlenderbotForConditionalGeneration',
'BlenderbotModel',
'BlenderbotPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TFBlenderbotForConditionalGeneration',
'TFBlenderbotModel',
'TFBlenderbotPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'FlaxBlenderbotForConditionalGeneration',
'FlaxBlenderbotModel',
'FlaxBlenderbotPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 355
|
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE )
lowercase__ = flatten_dict(SCREAMING_SNAKE_CASE )
return flax_params
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = {}
lowercase__ = {
'''token_embedder''': '''embeddings''',
'''encoder_norm''': '''layernorm''',
'''kernel''': '''weight''',
'''.out''': '''.output''',
'''scale''': '''weight''',
'''embedders_0.pos_embedding''': '''row_embedder.weight''',
'''embedders_1.pos_embedding''': '''column_embedder.weight''',
}
lowercase__ = {
'''query''': '''attention.query''',
'''key''': '''attention.key''',
'''value''': '''attention.value''',
'''output.dense''': '''output''',
'''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''',
'''pre_self_attention_layer_norm''': '''self_attention.layer_norm''',
'''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''',
'''mlp.''': '''mlp.DenseReluDense.''',
'''pre_mlp_layer_norm''': '''mlp.layer_norm''',
'''self_attention.o''': '''self_attention.attention.o''',
'''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''',
'''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''',
'''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.logits_dense.weight''': '''decoder.lm_head.weight''',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
lowercase__ = '''.'''.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
lowercase__ = new_key.replace(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE )
lowercase__ = new_key.replace('''encoder''' , '''encoder.encoder''' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
lowercase__ = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , SCREAMING_SNAKE_CASE )
lowercase__ = flax_dict[key]
lowercase__ = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
lowercase__ = torch.from_numpy(converted_dict[key].T )
else:
lowercase__ = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False , SCREAMING_SNAKE_CASE=False ):
"""simple docstring"""
lowercase__ = get_flax_param(SCREAMING_SNAKE_CASE )
if not use_large:
lowercase__ = PixaStructVisionConfig()
lowercase__ = PixaStructTextConfig()
else:
lowercase__ = PixaStructVisionConfig(
hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 )
lowercase__ = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 )
lowercase__ = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=SCREAMING_SNAKE_CASE )
lowercase__ = PixaStructForConditionalGeneration(SCREAMING_SNAKE_CASE )
lowercase__ = rename_and_convert_flax_params(SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
lowercase__ = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' )
lowercase__ = PixaStructImageProcessor()
lowercase__ = PixaStructProcessor(image_processor=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE )
if use_large:
lowercase__ = 40_96
lowercase__ = True
# mkdir if needed
os.makedirs(SCREAMING_SNAKE_CASE , exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print('''Model saved in {}'''.format(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.')
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--use_large', action='store_true', help='Use large model.')
parser.add_argument('--is_vqa', action='store_true', help='Use large model.')
lowerCAmelCase = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 93
| 0
|
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : str ={
'''configuration_xmod''': [
'''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XmodConfig''',
'''XmodOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Union[str, Any] =[
'''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XmodForCausalLM''',
'''XmodForMaskedLM''',
'''XmodForMultipleChoice''',
'''XmodForQuestionAnswering''',
'''XmodForSequenceClassification''',
'''XmodForTokenClassification''',
'''XmodModel''',
'''XmodPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xmod import (
XMOD_PRETRAINED_MODEL_ARCHIVE_LIST,
XmodForCausalLM,
XmodForMaskedLM,
XmodForMultipleChoice,
XmodForQuestionAnswering,
XmodForSequenceClassification,
XmodForTokenClassification,
XmodModel,
XmodPreTrainedModel,
)
else:
import sys
_lowercase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 170
|
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : NestedDataStructureLike[PathLike] , __SCREAMING_SNAKE_CASE : Optional[NamedSplit] = None , __SCREAMING_SNAKE_CASE : Optional[Features] = None , __SCREAMING_SNAKE_CASE : str = None , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[int] = None , **__SCREAMING_SNAKE_CASE : List[str] , ):
'''simple docstring'''
super().__init__(
__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE , streaming=__SCREAMING_SNAKE_CASE , num_proc=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
__a = path_or_paths if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE) else {self.split: path_or_paths}
__a = Text(
cache_dir=__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.streaming:
__a = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
__a = None
__a = None
__a = None
__a = None
self.builder.download_and_prepare(
download_config=__SCREAMING_SNAKE_CASE , download_mode=__SCREAMING_SNAKE_CASE , verification_mode=__SCREAMING_SNAKE_CASE , base_path=__SCREAMING_SNAKE_CASE , num_proc=self.num_proc , )
__a = self.builder.as_dataset(
split=self.split , verification_mode=__SCREAMING_SNAKE_CASE , in_memory=self.keep_in_memory)
return dataset
| 49
| 0
|
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : Any = []
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''',
f'''stage{idx}.patch_embed.proj.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''',
f'''stage{idx}.patch_embed.proj.bias''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''',
f'''stage{idx}.patch_embed.norm.weight''',
) )
embed.append(
(
f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''',
f'''stage{idx}.patch_embed.norm.bias''',
) )
return embed
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = []
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''',
f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''',
f'''stage{idx}.blocks.{cnt}.attn.proj.weight''',
) )
attention_weights.append(
(
f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''',
f'''stage{idx}.blocks.{cnt}.attn.proj.bias''',
) )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''') )
attention_weights.append(
(f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''') )
return attention_weights
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : int = []
token.append((f'''cvt.encoder.stages.{idx}.cls_token''', '''stage2.cls_token''') )
return token
def _SCREAMING_SNAKE_CASE ( ):
A_ : Dict = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : List[Any] = '''imagenet-1k-id2label.json'''
A_ : int = 1_000
A_ : int = '''huggingface/label-files'''
A_ : Optional[Any] = num_labels
A_ : List[str] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
A_ : List[str] = {int(SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
A_ : Optional[int] = idalabel
A_ : Dict = {v: k for k, v in idalabel.items()}
A_ : int = CvtConfig(num_labels=SCREAMING_SNAKE_CASE , idalabel=SCREAMING_SNAKE_CASE , labelaid=SCREAMING_SNAKE_CASE )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
A_ : List[str] = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
A_ : int = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
A_ : Union[str, Any] = [2, 2, 20]
A_ : Union[str, Any] = [3, 12, 16]
A_ : int = [192, 768, 1_024]
A_ : Any = CvtForImageClassification(SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
A_ : Dict = image_size
A_ : Tuple = torch.load(SCREAMING_SNAKE_CASE , map_location=torch.device('''cpu''' ) )
A_ : Optional[int] = OrderedDict()
A_ : str = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
A_ : Any = list_of_state_dict + cls_token(SCREAMING_SNAKE_CASE )
A_ : Dict = list_of_state_dict + embeddings(SCREAMING_SNAKE_CASE )
for cnt in range(config.depth[idx] ):
A_ : Union[str, Any] = list_of_state_dict + attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A_ : Tuple = list_of_state_dict + final()
for gg in list_of_state_dict:
print(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
A_ : Optional[Any] = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--cvt_model""",
default="""cvt-w24""",
type=str,
help="""Name of the cvt model you'd like to convert.""",
)
parser.add_argument(
"""--image_size""",
default=384,
type=int,
help="""Input Image Size""",
)
parser.add_argument(
"""--cvt_file_name""",
default=r"""cvtmodels\CvT-w24-384x384-IN-22k.pth""",
type=str,
help="""Input Image Size""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 65
|
from manim import *
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Optional[int] = Rectangle(height=0.5 , width=0.5 )
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
A_ : Any = [mem.copy() for i in range(6 )]
A_ : Tuple = [mem.copy() for i in range(6 )]
A_ : str = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = VGroup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Optional[Any] = Text('''CPU''' , font_size=24 )
A_ : Union[str, Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[int] = [mem.copy() for i in range(1 )]
A_ : Any = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Dict = Text('''GPU''' , font_size=24 )
A_ : List[str] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
gpu.align_to(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
gpu.set_x(gpu.get_x() - 1 )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Optional[Any] = [mem.copy() for i in range(6 )]
A_ : List[str] = VGroup(*_SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0 )
A_ : Union[str, Any] = Text('''Model''' , font_size=24 )
A_ : List[Any] = Group(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).arrange(_SCREAMING_SNAKE_CASE , buff=0.5 , aligned_edge=_SCREAMING_SNAKE_CASE )
model.move_to([3, -1.0, 0] )
self.play(
Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , Create(_SCREAMING_SNAKE_CASE , run_time=1 ) , )
A_ : int = MarkupText(
F'''First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.''' , font_size=24 , )
A_ : Union[str, Any] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
A_ : Any = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_SCREAMING_SNAKE_CASE , run_time=2.5 ) , Write(_SCREAMING_SNAKE_CASE ) , Write(_SCREAMING_SNAKE_CASE ) )
self.add(_SCREAMING_SNAKE_CASE )
A_ : Dict = []
A_ : int = []
A_ : Optional[Any] = []
for i, rect in enumerate(_SCREAMING_SNAKE_CASE ):
A_ : Union[str, Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(_SCREAMING_SNAKE_CASE , opacity=0.7 )
cpu_target.move_to(_SCREAMING_SNAKE_CASE )
cpu_target.generate_target()
A_ : Union[str, Any] = 0.4_6 / 4
A_ : Any = 0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=_SCREAMING_SNAKE_CASE )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_SCREAMING_SNAKE_CASE , buff=0.0 )
cpu_targs.append(_SCREAMING_SNAKE_CASE )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_SCREAMING_SNAKE_CASE ) )
second_animations.append(MoveToTarget(_SCREAMING_SNAKE_CASE , run_time=1.5 ) )
self.play(*_SCREAMING_SNAKE_CASE )
self.play(*_SCREAMING_SNAKE_CASE )
self.wait()
| 65
| 1
|
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
_a = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=False, ):
output_path.parent.mkdir(parents=__lowerCamelCase, exist_ok=__lowerCamelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
__lowerCamelCase, __lowerCamelCase, f=output_path.as_posix(), input_names=__lowerCamelCase, output_names=__lowerCamelCase, dynamic_axes=__lowerCamelCase, do_constant_folding=__lowerCamelCase, use_external_data_format=__lowerCamelCase, enable_onnx_checker=__lowerCamelCase, opset_version=__lowerCamelCase, )
else:
export(
__lowerCamelCase, __lowerCamelCase, f=output_path.as_posix(), input_names=__lowerCamelCase, output_names=__lowerCamelCase, dynamic_axes=__lowerCamelCase, do_constant_folding=__lowerCamelCase, opset_version=__lowerCamelCase, )
@torch.no_grad()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = False ):
UpperCAmelCase_ : Optional[Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase_ : Optional[Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
UpperCAmelCase_ : Any = "cpu"
UpperCAmelCase_ : Optional[Any] = Path(__lowerCamelCase )
# VAE DECODER
UpperCAmelCase_ : List[Any] = AutoencoderKL.from_pretrained(model_path + "/vae" )
UpperCAmelCase_ : List[str] = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCAmelCase_ : List[Any] = vae_decoder.decode
onnx_export(
__lowerCamelCase, model_args=(
torch.randn(1, __lowerCamelCase, 25, 25 ).to(device=__lowerCamelCase, dtype=__lowerCamelCase ),
False,
), output_path=output_path / "vae_decoder" / "model.onnx", ordered_input_names=["latent_sample", "return_dict"], output_names=["sample"], dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
}, opset=__lowerCamelCase, )
del vae_decoder
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
_a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX')
| 61
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["GPTSw3Tokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 164
| 0
|
import json
import logging
import os
import re
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import datasets
import numpy as np
import torch
import torchaudio
from packaging import version
from torch import nn
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaProcessor,
is_apex_available,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse('''1.6'''):
snake_case : Tuple = True
from torch.cuda.amp import autocast
snake_case : Optional[int] = logging.getLogger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=None ):
"""simple docstring"""
return field(default_factory=lambda: default , metadata=UpperCAmelCase_ )
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
SCREAMING_SNAKE_CASE__ = field(
default=_snake_case , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE__ = field(
default=_snake_case , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} )
SCREAMING_SNAKE_CASE__ = field(
default=0.1 , metadata={'help': 'The dropout ratio for the attention probabilities.'} )
SCREAMING_SNAKE_CASE__ = field(
default=0.1 , metadata={'help': 'The dropout ratio for activations inside the fully connected layer.'} )
SCREAMING_SNAKE_CASE__ = field(
default=0.1 , metadata={
'help': 'The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.'
} , )
SCREAMING_SNAKE_CASE__ = field(
default=0.1 , metadata={'help': 'The dropout probabilitiy for all 1D convolutional layers in feature extractor.'} , )
SCREAMING_SNAKE_CASE__ = field(
default=0.05 , metadata={
'help': (
'Propability of each feature vector along the time axis to be chosen as the start of the vector'
'span to be masked. Approximately ``mask_time_prob * sequence_length // mask_time_length`` feature'
'vectors will be masked along the time axis. This is only relevant if ``apply_spec_augment is True``.'
)
} , )
SCREAMING_SNAKE_CASE__ = field(default=0.0 , metadata={'help': 'The LayerDrop probability.'} )
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE__ = field(
default=_snake_case , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE__ = field(
default='train+validation' , metadata={
'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\''
} , )
SCREAMING_SNAKE_CASE__ = field(
default=_snake_case , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
SCREAMING_SNAKE_CASE__ = field(
default=_snake_case , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
SCREAMING_SNAKE_CASE__ = field(
default=_snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE__ = field(
default=_snake_case , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of validation examples to this '
'value if set.'
)
} , )
SCREAMING_SNAKE_CASE__ = list_field(
default=[',', '?', '.', '!', '-', ';', ':', '""', '%', '\'', '"', '�'] , metadata={'help': 'A list of characters to remove from the transcripts.'} , )
@dataclass
class _snake_case :
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = None
def __call__( self , _lowerCamelCase ):
# split inputs and labels since they have to be of different lenghts and need
# different padding methods
a :Any = [{'''input_values''': feature['''input_values''']} for feature in features]
a :List[str] = [{'''input_ids''': feature['''labels''']} for feature in features]
a :Dict = self.processor.pad(
_lowerCamelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
a :Union[str, Any] = self.processor.pad(
labels=_lowerCamelCase , padding=self.padding , max_length=self.max_length_labels , pad_to_multiple_of=self.pad_to_multiple_of_labels , return_tensors='''pt''' , )
# replace padding with -100 to ignore loss correctly
a :Any = labels_batch['''input_ids'''].masked_fill(labels_batch.attention_mask.ne(1 ) , -100 )
a :Optional[Any] = labels
return batch
class _snake_case ( _snake_case ):
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
model.train()
a :Union[str, Any] = self._prepare_inputs(_lowerCamelCase )
if self.use_amp:
with autocast():
a :Any = self.compute_loss(_lowerCamelCase , _lowerCamelCase )
else:
a :List[Any] = self.compute_loss(_lowerCamelCase , _lowerCamelCase )
if self.args.n_gpu > 1:
if model.module.config.ctc_loss_reduction == "mean":
a :Optional[int] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
a :Optional[Any] = loss.sum() / (inputs['''labels'''] >= 0).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
a :Union[str, Any] = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(_lowerCamelCase ).backward()
elif self.use_apex:
with amp.scale_loss(_lowerCamelCase , self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(_lowerCamelCase )
else:
loss.backward()
return loss.detach()
def __lowerCamelCase ( ):
"""simple docstring"""
a :str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a :List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a :Tuple = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
a :int = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a :Union[str, Any] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''' , UpperCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets:
a :List[Any] = datasets.load_dataset(
'''common_voice''' , data_args.dataset_config_name , split=data_args.train_split_name )
a :Union[str, Any] = datasets.load_dataset('''common_voice''' , data_args.dataset_config_name , split='''test''' )
# Create and save tokenizer
a :Any = F'''[{''.join(data_args.chars_to_ignore )}]'''
def remove_special_characters(UpperCAmelCase_ : Tuple ):
a :Union[str, Any] = re.sub(UpperCAmelCase_ , '''''' , batch['''sentence'''] ).lower() + ''' '''
return batch
a :Dict = train_dataset.map(UpperCAmelCase_ , remove_columns=['''sentence'''] )
a :str = eval_dataset.map(UpperCAmelCase_ , remove_columns=['''sentence'''] )
def extract_all_chars(UpperCAmelCase_ : Optional[int] ):
a :List[Any] = ''' '''.join(batch['''text'''] )
a :int = list(set(UpperCAmelCase_ ) )
return {"vocab": [vocab], "all_text": [all_text]}
a :Tuple = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=-1 , keep_in_memory=UpperCAmelCase_ , remove_columns=train_dataset.column_names , )
a :Optional[int] = train_dataset.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , batch_size=-1 , keep_in_memory=UpperCAmelCase_ , remove_columns=eval_dataset.column_names , )
a :Dict = list(set(vocab_train['''vocab'''][0] ) | set(vocab_test['''vocab'''][0] ) )
a :List[Any] = {v: k for k, v in enumerate(UpperCAmelCase_ )}
a :Dict = vocab_dict[''' ''']
del vocab_dict[" "]
a :str = len(UpperCAmelCase_ )
a :Dict = len(UpperCAmelCase_ )
with open('''vocab.json''' , '''w''' ) as vocab_file:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a :Optional[Any] = WavaVecaCTCTokenizer(
'''vocab.json''' , unk_token='''[UNK]''' , pad_token='''[PAD]''' , word_delimiter_token='''|''' , )
a :str = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0.0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ )
a :Dict = WavaVecaProcessor(feature_extractor=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ )
a :Optional[int] = WavaVecaForCTC.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , activation_dropout=model_args.activation_dropout , attention_dropout=model_args.attention_dropout , hidden_dropout=model_args.hidden_dropout , feat_proj_dropout=model_args.feat_proj_dropout , mask_time_prob=model_args.mask_time_prob , gradient_checkpointing=training_args.gradient_checkpointing , layerdrop=model_args.layerdrop , ctc_loss_reduction='''mean''' , pad_token_id=processor.tokenizer.pad_token_id , vocab_size=len(processor.tokenizer ) , )
if data_args.max_train_samples is not None:
a :List[Any] = min(len(UpperCAmelCase_ ) , data_args.max_train_samples )
a :Dict = train_dataset.select(range(UpperCAmelCase_ ) )
if data_args.max_val_samples is not None:
a :Dict = eval_dataset.select(range(data_args.max_val_samples ) )
a :Dict = torchaudio.transforms.Resample(4_8000 , 1_6000 )
# Preprocessing the datasets.
# We need to read the aduio files as arrays and tokenize the targets.
def speech_file_to_array_fn(UpperCAmelCase_ : List[Any] ):
a :Tuple = torchaudio.load(batch['''path'''] )
a :Optional[Any] = resampler(UpperCAmelCase_ ).squeeze().numpy()
a :List[Any] = 1_6000
a :str = batch['''text''']
return batch
a :List[str] = train_dataset.map(
UpperCAmelCase_ , remove_columns=train_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
a :Optional[Any] = eval_dataset.map(
UpperCAmelCase_ , remove_columns=eval_dataset.column_names , num_proc=data_args.preprocessing_num_workers , )
def prepare_dataset(UpperCAmelCase_ : List[str] ):
# check that all files have the correct sampling rate
assert (
len(set(batch['''sampling_rate'''] ) ) == 1
), F'''Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}.'''
a :Dict = processor(
audio=batch['''speech'''] , text=batch['''target_text'''] , sampling_rate=batch['''sampling_rate'''][0] )
batch.update(UpperCAmelCase_ )
return batch
a :Optional[Any] = train_dataset.map(
UpperCAmelCase_ , remove_columns=train_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
a :List[str] = eval_dataset.map(
UpperCAmelCase_ , remove_columns=eval_dataset.column_names , batch_size=training_args.per_device_train_batch_size , batched=UpperCAmelCase_ , num_proc=data_args.preprocessing_num_workers , )
# Metric
a :Optional[Any] = datasets.load_metric('''wer''' )
def compute_metrics(UpperCAmelCase_ : List[Any] ):
a :List[str] = pred.predictions
a :Dict = np.argmax(UpperCAmelCase_ , axis=-1 )
a :Dict = processor.tokenizer.pad_token_id
a :int = processor.batch_decode(UpperCAmelCase_ )
# we do not want to group tokens when computing the metrics
a :Dict = processor.batch_decode(pred.label_ids , group_tokens=UpperCAmelCase_ )
a :Optional[Any] = wer_metric.compute(predictions=UpperCAmelCase_ , references=UpperCAmelCase_ )
return {"wer": wer}
if model_args.freeze_feature_extractor:
model.freeze_feature_extractor()
# Data collator
a :int = DataCollatorCTCWithPadding(processor=UpperCAmelCase_ , padding=UpperCAmelCase_ )
# Initialize our Trainer
a :Tuple = CTCTrainer(
model=UpperCAmelCase_ , data_collator=UpperCAmelCase_ , args=UpperCAmelCase_ , compute_metrics=UpperCAmelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=processor.feature_extractor , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
a :int = last_checkpoint
elif os.path.isdir(model_args.model_name_or_path ):
a :Tuple = model_args.model_name_or_path
else:
a :Optional[int] = None
# Save the feature_extractor and the tokenizer
if is_main_process(training_args.local_rank ):
processor.save_pretrained(training_args.output_dir )
a :int = trainer.train(resume_from_checkpoint=UpperCAmelCase_ )
trainer.save_model()
a :Optional[int] = train_result.metrics
a :int = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase_ )
)
a :Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('''train''' , UpperCAmelCase_ )
trainer.save_metrics('''train''' , UpperCAmelCase_ )
trainer.save_state()
# Evaluation
a :Dict = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
a :Optional[int] = trainer.evaluate()
a :Tuple = data_args.max_val_samples if data_args.max_val_samples is not None else len(UpperCAmelCase_ )
a :Optional[Any] = min(UpperCAmelCase_ , len(UpperCAmelCase_ ) )
trainer.log_metrics('''eval''' , UpperCAmelCase_ )
trainer.save_metrics('''eval''' , UpperCAmelCase_ )
return results
if __name__ == "__main__":
main()
| 370
|
import math
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ):
"""simple docstring"""
return math.pow(UpperCAmelCase_ , 2 ) - a
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
return 2 * x
def __lowerCamelCase ( UpperCAmelCase_ : float ):
"""simple docstring"""
a :int = 2.0
while start <= a:
a :int = math.pow(UpperCAmelCase_ , 2 )
return start
def __lowerCamelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 9999 , UpperCAmelCase_ : float = 0.00000000000001 ):
"""simple docstring"""
if a < 0:
raise ValueError('''math domain error''' )
a :List[Any] = get_initial_point(UpperCAmelCase_ )
for _ in range(UpperCAmelCase_ ):
a :Optional[int] = value
a :int = value - fx(UpperCAmelCase_ , UpperCAmelCase_ ) / fx_derivative(UpperCAmelCase_ )
if abs(prev_value - value ) < tolerance:
return value
return value
if __name__ == "__main__":
from doctest import testmod
testmod()
| 281
| 0
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( __a ):
def __init__( self : Optional[Any] , *_A : Tuple , **_A : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use LayoutLMv2ImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 181
|
from ...processing_utils import ProcessorMixin
class _UpperCamelCase ( lowerCAmelCase ):
UpperCAmelCase_ = ["""image_processor""", """feature_extractor"""]
UpperCAmelCase_ = """TvltImageProcessor"""
UpperCAmelCase_ = """TvltFeatureExtractor"""
def __init__( self :List[str] , lowerCamelCase :Dict , lowerCamelCase :Tuple ) -> Any:
super().__init__(image_processor=lowerCamelCase , feature_extractor=lowerCamelCase )
UpperCAmelCase__ = image_processor
UpperCAmelCase__ = feature_extractor
def __call__( self :Union[str, Any] , lowerCamelCase :List[str]=None , lowerCamelCase :int=None , lowerCamelCase :List[Any]=None , lowerCamelCase :Dict=None , lowerCamelCase :List[str]=False , lowerCamelCase :Optional[Any]=False , *lowerCamelCase :List[Any] , **lowerCamelCase :Dict , ) -> List[str]:
if images is None and audio is None:
raise ValueError("You need to specify either an `images` or `audio` input to process." )
UpperCAmelCase__ = None
if images is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , mask_pixel=lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if images_mixed is not None:
UpperCAmelCase__ = self.image_processor(lowerCamelCase , is_mixed=lowerCamelCase , *lowerCamelCase , **lowerCamelCase )
if audio is not None:
UpperCAmelCase__ = self.feature_extractor(
lowerCamelCase , *lowerCamelCase , sampling_rate=lowerCamelCase , mask_audio=lowerCamelCase , **lowerCamelCase )
UpperCAmelCase__ = {}
if audio is not None:
output_dict.update(lowerCamelCase )
if images is not None:
output_dict.update(lowerCamelCase )
if images_mixed_dict is not None:
output_dict.update(lowerCamelCase )
return output_dict
@property
def UpperCAmelCase_ ( self :Dict ) -> Optional[Any]:
UpperCAmelCase__ = self.image_processor.model_input_names
UpperCAmelCase__ = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) )
| 169
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
_lowercase : Tuple = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCAmelCase__ ( lowerCamelCase_ ):
lowerCAmelCase_ = ['''pixel_values''']
def __init__( self , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = 1 / 2_55 , __SCREAMING_SNAKE_CASE = True , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
super().__init__(**__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 2_24}
lowercase_ : Any = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowercase_ : Union[str, Any] = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE , param_name='''crop_size''' )
lowercase_ : Optional[Any] = do_resize
lowercase_ : int = size
lowercase_ : Tuple = resample
lowercase_ : str = do_center_crop
lowercase_ : Optional[Any] = crop_size
lowercase_ : Dict = do_rescale
lowercase_ : Union[str, Any] = rescale_factor
lowercase_ : Optional[int] = do_normalize
lowercase_ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowercase_ : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD
lowercase_ : List[str] = do_convert_rgb
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = PILImageResampling.BICUBIC , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : int = get_size_dict(__SCREAMING_SNAKE_CASE , default_to_square=__SCREAMING_SNAKE_CASE )
if "shortest_edge" not in size:
raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
lowercase_ : Union[str, Any] = get_resize_output_image_size(__SCREAMING_SNAKE_CASE , size=size['''shortest_edge'''] , default_to_square=__SCREAMING_SNAKE_CASE )
return resize(__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : Optional[int] = get_size_dict(__SCREAMING_SNAKE_CASE )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(__SCREAMING_SNAKE_CASE , size=(size['''height'''], size['''width''']) , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return rescale(__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
return normalize(__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE , data_format=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def _snake_case ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = ChannelDimension.FIRST , **__SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
lowercase_ : int = do_resize if do_resize is not None else self.do_resize
lowercase_ : Optional[Any] = size if size is not None else self.size
lowercase_ : int = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''size''' , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Union[str, Any] = resample if resample is not None else self.resample
lowercase_ : str = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase_ : str = crop_size if crop_size is not None else self.crop_size
lowercase_ : Any = get_size_dict(__SCREAMING_SNAKE_CASE , param_name='''crop_size''' , default_to_square=__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowercase_ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize
lowercase_ : str = image_mean if image_mean is not None else self.image_mean
lowercase_ : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase_ : Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowercase_ : Any = make_list_of_images(__SCREAMING_SNAKE_CASE )
if not valid_images(__SCREAMING_SNAKE_CASE ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowercase_ : Optional[Any] = [convert_to_rgb(__SCREAMING_SNAKE_CASE ) for image in images]
# All transformations expect numpy arrays.
lowercase_ : Optional[int] = [to_numpy_array(__SCREAMING_SNAKE_CASE ) for image in images]
if do_resize:
lowercase_ : Optional[int] = [self.resize(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE , resample=__SCREAMING_SNAKE_CASE ) for image in images]
if do_center_crop:
lowercase_ : List[Any] = [self.center_crop(image=__SCREAMING_SNAKE_CASE , size=__SCREAMING_SNAKE_CASE ) for image in images]
if do_rescale:
lowercase_ : str = [self.rescale(image=__SCREAMING_SNAKE_CASE , scale=__SCREAMING_SNAKE_CASE ) for image in images]
if do_normalize:
lowercase_ : Optional[Any] = [self.normalize(image=__SCREAMING_SNAKE_CASE , mean=__SCREAMING_SNAKE_CASE , std=__SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : List[Any] = [to_channel_dimension_format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for image in images]
lowercase_ : int = {'''pixel_values''': images}
return BatchFeature(data=__SCREAMING_SNAKE_CASE , tensor_type=__SCREAMING_SNAKE_CASE )
| 264
|
'''simple docstring'''
from PIL import Image
def snake_case_ ( __SCREAMING_SNAKE_CASE : Image , __SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
lowercase_ : Optional[int] = (259 * (level + 255)) / (255 * (259 - level))
def contrast(__SCREAMING_SNAKE_CASE : int ) -> int:
return int(128 + factor * (c - 128) )
return img.point(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Load image
with Image.open("image_data/lena.jpg") as img:
# Change contrast to 170
_lowercase : Union[str, Any] = change_contrast(img, 1_7_0)
cont_img.save("image_data/lena_high_contrast.png", format="png")
| 264
| 1
|
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = ["image_processor", "tokenizer"]
__UpperCamelCase = "ChineseCLIPImageProcessor"
__UpperCamelCase = ("BertTokenizer", "BertTokenizerFast")
def __init__( self : List[str] , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , **lowercase_ : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase_ , )
SCREAMING_SNAKE_CASE_ : List[Any] = kwargs.pop('''feature_extractor''')
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(lowercase_ , lowercase_)
SCREAMING_SNAKE_CASE_ : str = self.image_processor
def __call__( self : Tuple , lowercase_ : List[str]=None , lowercase_ : int=None , lowercase_ : Union[str, Any]=None , **lowercase_ : List[str]):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
SCREAMING_SNAKE_CASE_ : Tuple = self.tokenizer(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if images is not None:
SCREAMING_SNAKE_CASE_ : Optional[int] = self.image_processor(lowercase_ , return_tensors=lowercase_ , **lowercase_)
if text is not None and images is not None:
SCREAMING_SNAKE_CASE_ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase_) , tensor_type=lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Any , *lowercase_ : str , **lowercase_ : Dict):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str] , *lowercase_ : int , **lowercase_ : int):
'''simple docstring'''
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase_ , )
return self.image_processor_class
| 91
|
'''simple docstring'''
import argparse
import copy
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
lowercase_ : List[Any] = {}
with open(__SCREAMING_SNAKE_CASE ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowercase_ : Union[str, Any] = []
_list.append([line.split()[1], line.split()[2]] )
lowercase_ : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowercase_ : Optional[int] = []
_list.append([line.split()[0], line.split()[2]] )
lowercase_ : Dict = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def snake_case_ ( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
with open(__SCREAMING_SNAKE_CASE ) as f:
lowercase_ : List[str] = f.read(1 )
lowercase_ : Optional[int] = start_node
lowercase_ : Any = []
lowercase_ : List[str] = start_node
lowercase_ : Optional[Any] = 0
while visiting not in first_solution:
lowercase_ : Any = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(__SCREAMING_SNAKE_CASE ) and k[0] not in first_solution:
lowercase_ : List[Any] = k[1]
lowercase_ : List[Any] = k[0]
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Optional[Any] = distance_of_first_solution + int(__SCREAMING_SNAKE_CASE )
lowercase_ : int = best_node
first_solution.append(__SCREAMING_SNAKE_CASE )
lowercase_ : Tuple = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowercase_ : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
lowercase_ : Tuple = []
for n in solution[1:-1]:
lowercase_ : List[str] = solution.index(__SCREAMING_SNAKE_CASE )
for kn in solution[1:-1]:
lowercase_ : Any = solution.index(__SCREAMING_SNAKE_CASE )
if n == kn:
continue
lowercase_ : Dict = copy.deepcopy(__SCREAMING_SNAKE_CASE )
lowercase_ : Dict = kn
lowercase_ : List[Any] = n
lowercase_ : str = 0
for k in _tmp[:-1]:
lowercase_ : Tuple = _tmp[_tmp.index(__SCREAMING_SNAKE_CASE ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowercase_ : Optional[Any] = distance + int(i[1] )
_tmp.append(__SCREAMING_SNAKE_CASE )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowercase_ : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda __SCREAMING_SNAKE_CASE : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def snake_case_ ( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
lowercase_ : Optional[int] = 1
lowercase_ : List[str] = first_solution
lowercase_ : Dict = []
lowercase_ : List[str] = distance_of_first_solution
lowercase_ : Optional[Any] = solution
while count <= iters:
lowercase_ : int = find_neighborhood(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : Any = 0
lowercase_ : Dict = neighborhood[index_of_best_solution]
lowercase_ : Optional[Any] = len(__SCREAMING_SNAKE_CASE ) - 1
lowercase_ : Tuple = False
while not found:
lowercase_ : Optional[int] = 0
while i < len(__SCREAMING_SNAKE_CASE ):
if best_solution[i] != solution[i]:
lowercase_ : Tuple = best_solution[i]
lowercase_ : Optional[int] = solution[i]
break
lowercase_ : int = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowercase_ : Tuple = True
lowercase_ : Optional[int] = best_solution[:-1]
lowercase_ : Optional[Any] = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowercase_ : Optional[Any] = cost
lowercase_ : int = solution
else:
lowercase_ : Any = index_of_best_solution + 1
lowercase_ : Any = neighborhood[index_of_best_solution]
if len(__SCREAMING_SNAKE_CASE ) >= size:
tabu_list.pop(0 )
lowercase_ : List[Any] = count + 1
return best_solution_ever, best_cost
def snake_case_ ( __SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
lowercase_ : Any = generate_neighbours(args.File )
lowercase_ , lowercase_ : Union[str, Any] = generate_first_solution(
args.File , __SCREAMING_SNAKE_CASE )
lowercase_ , lowercase_ : Optional[int] = tabu_search(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , args.Iterations , args.Size , )
print(F'''Best solution: {best_sol}, with total distance: {best_cost}.''' )
if __name__ == "__main__":
_lowercase : Any = argparse.ArgumentParser(description="Tabu Search")
parser.add_argument(
"-f",
"--File",
type=str,
help="Path to the file containing the data",
required=True,
)
parser.add_argument(
"-i",
"--Iterations",
type=int,
help="How many iterations the algorithm should perform",
required=True,
)
parser.add_argument(
"-s", "--Size", type=int, help="Size of the tabu list", required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 93
| 0
|
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = "https://www.worldometers.info/coronavirus" ) -> dict:
lowercase__: List[str] = BeautifulSoup(requests.get(__UpperCAmelCase ).text , '''html.parser''' )
lowercase__: List[Any] = soup.findAll('''h1''' )
lowercase__: int = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(__UpperCAmelCase , __UpperCAmelCase )}
if __name__ == "__main__":
print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n")
for key, value in world_covidaa_stats().items():
print(f'''{key}\n{value}\n''')
| 2
|
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
__A = logging.get_logger(__name__)
__A = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :str = "bloom"
_UpperCAmelCase :List[str] = ["past_key_values"]
_UpperCAmelCase :Optional[Any] = {
"num_hidden_layers": "n_layer",
"num_attention_heads": "n_head",
}
def __init__( self , _UpperCAmelCase=250880 , _UpperCAmelCase=64 , _UpperCAmelCase=2 , _UpperCAmelCase=8 , _UpperCAmelCase=1e-5 , _UpperCAmelCase=0.02 , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=2 , _UpperCAmelCase=False , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , **_UpperCAmelCase , ):
lowercase__: Any = vocab_size
# Backward compatibility with n_embed kwarg
lowercase__: Optional[Any] = kwargs.pop('''n_embed''' , _UpperCAmelCase )
lowercase__: int = hidden_size if n_embed is None else n_embed
lowercase__: int = n_layer
lowercase__: int = n_head
lowercase__: Optional[Any] = layer_norm_epsilon
lowercase__: int = initializer_range
lowercase__: List[Any] = use_cache
lowercase__: str = pretraining_tp
lowercase__: Tuple = apply_residual_connection_post_layernorm
lowercase__: int = hidden_dropout
lowercase__: Optional[Any] = attention_dropout
lowercase__: int = bos_token_id
lowercase__: Union[str, Any] = eos_token_id
lowercase__: Any = slow_but_exact
super().__init__(bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :int = version.parse("1.12" )
def __init__( self , _UpperCAmelCase , _UpperCAmelCase = "default" , _UpperCAmelCase = None , _UpperCAmelCase = False , ):
super().__init__(_UpperCAmelCase , task=_UpperCAmelCase , patching_specs=_UpperCAmelCase , use_past=_UpperCAmelCase )
if not getattr(self._config , '''pad_token_id''' , _UpperCAmelCase ):
# TODO: how to do that better?
lowercase__: Any = 0
@property
def _snake_case ( self ):
lowercase__: str = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_UpperCAmelCase , direction='''inputs''' , inverted_values_shape=_UpperCAmelCase )
lowercase__: List[str] = {0: '''batch''', 1: '''past_sequence + sequence'''}
else:
lowercase__: str = {0: '''batch''', 1: '''sequence'''}
return common_inputs
@property
def _snake_case ( self ):
return self._config.n_layer
@property
def _snake_case ( self ):
return self._config.n_head
@property
def _snake_case ( self ):
return 1e-3
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase = -1 , _UpperCAmelCase = -1 , _UpperCAmelCase = False , _UpperCAmelCase = None , ):
lowercase__: str = super(_UpperCAmelCase , self ).generate_dummy_inputs(
_UpperCAmelCase , batch_size=_UpperCAmelCase , seq_length=_UpperCAmelCase , is_pair=_UpperCAmelCase , framework=_UpperCAmelCase )
# We need to order the input in the way they appears in the forward()
lowercase__: List[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
lowercase__, lowercase__: Optional[Any] = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
lowercase__: Tuple = seqlen + 2
lowercase__: str = self._config.hidden_size // self.num_attention_heads
lowercase__: Optional[int] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
lowercase__: Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
lowercase__: str = [
(torch.zeros(_UpperCAmelCase ), torch.zeros(_UpperCAmelCase )) for _ in range(self.num_layers )
]
lowercase__: Tuple = common_inputs['''attention_mask''']
if self.use_past:
lowercase__: int = ordered_inputs['''attention_mask'''].dtype
lowercase__: List[str] = torch.cat(
[ordered_inputs['''attention_mask'''], torch.ones(_UpperCAmelCase , _UpperCAmelCase , dtype=_UpperCAmelCase )] , dim=1 )
return ordered_inputs
@property
def _snake_case ( self ):
return 13
| 2
| 1
|
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = '▁'
UpperCamelCase__ = {'vocab_file': 'sentencepiece.bpe.model'}
UpperCamelCase__ = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
UpperCamelCase__ = {
'facebook/mbart-large-en-ro': 1_0_2_4,
'facebook/mbart-large-cc25': 1_0_2_4,
}
# fmt: off
UpperCamelCase__ = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : str = ['input_ids', 'attention_mask']
__UpperCAmelCase : List[int] = []
__UpperCAmelCase : List[int] = []
def __init__(self : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple="<s>" , __UpperCAmelCase : List[Any]="</s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Union[str, Any]="<pad>" , __UpperCAmelCase : Optional[int]="<mask>" , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Dict=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : Optional[Dict[str, Any]] = None , __UpperCAmelCase : Tuple=None , **__UpperCAmelCase : str , ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , src_lang=__UpperCAmelCase , tgt_lang=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCAmelCase ) )
UpperCAmelCase__ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase__ = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase__ = 1
UpperCAmelCase__ = len(self.sp_model )
UpperCAmelCase__ = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCAmelCase )
}
UpperCAmelCase__ = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase__ = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase__ = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase__ = src_lang if src_lang is not None else "en_XX"
UpperCAmelCase__ = self.lang_code_to_id[self._src_lang]
UpperCAmelCase__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__(self : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
return state
def __setstate__(self : int , __UpperCAmelCase : int ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def lowercase_ (self : int ) -> Optional[int]:
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def lowercase_ (self : str ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def lowercase_ (self : Any , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowercase_ (self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
UpperCAmelCase__ = [1] * len(self.prefix_tokens )
UpperCAmelCase__ = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__UpperCAmelCase )) + suffix_ones
return prefix_ones + ([0] * len(__UpperCAmelCase )) + ([0] * len(__UpperCAmelCase )) + suffix_ones
def lowercase_ (self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowercase_ (self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] , __UpperCAmelCase : Optional[str] , **__UpperCAmelCase : int ) -> str:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
UpperCAmelCase__ = src_lang
UpperCAmelCase__ = self(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors=__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase__ = self.convert_tokens_to_ids(__UpperCAmelCase )
UpperCAmelCase__ = tgt_lang_id
return inputs
def lowercase_ (self : Dict ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowercase_ (self : List[Any] , __UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def lowercase_ (self : List[str] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase__ = self.sp_model.PieceToId(__UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def lowercase_ (self : str , __UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase__ = "".join(__UpperCAmelCase ).replace(__UpperCAmelCase , " " ).strip()
return out_string
def lowercase_ (self : str , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,)
def lowercase_ (self : List[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = "en_XX" , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "ro_RO" , **__UpperCAmelCase : Union[str, Any] , ) -> BatchEncoding:
"""simple docstring"""
UpperCAmelCase__ = src_lang
UpperCAmelCase__ = tgt_lang
return super().prepare_seqaseq_batch(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
def lowercase_ (self : Any ) -> List[Any]:
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def lowercase_ (self : Dict ) -> List[str]:
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowercase_ (self : List[Any] , __UpperCAmelCase : Optional[int] ) -> None:
"""simple docstring"""
UpperCAmelCase__ = self.lang_code_to_id[src_lang]
UpperCAmelCase__ = []
UpperCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
def lowercase_ (self : int , __UpperCAmelCase : str ) -> None:
"""simple docstring"""
UpperCAmelCase__ = self.lang_code_to_id[lang]
UpperCAmelCase__ = []
UpperCAmelCase__ = [self.eos_token_id, self.cur_lang_code]
| 65
|
import math
import random
def lowerCAmelCase_ ( __A, __A = False ) -> float:
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
UpperCamelCase__ = 0.0_2
def lowerCAmelCase_ ( __A, __A ) -> float:
'''simple docstring'''
UpperCAmelCase__ = float(2 * (random.randint(1, 100 )) - 1 )
for _ in range(__A ):
# Forward propagation
UpperCAmelCase__ = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
UpperCAmelCase__ = (expected / 100) - layer_a
# Error delta
UpperCAmelCase__ = layer_1_error * sigmoid_function(__A, __A )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 100
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase__ = int(input('Expected value: '))
UpperCamelCase__ = int(input('Number of propagations: '))
print(forward_propagation(expected, number_propagations))
| 65
| 1
|
def __lowerCAmelCase ( a__ , a__ ) -> int:
__a = [1]
for i in range(2 , UpperCAmelCase_ ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__a = []
__a = list(range(UpperCAmelCase_ ) )
# Find permutation
while factorials:
__a = factorials.pop()
__a = divmod(UpperCAmelCase_ , UpperCAmelCase_ )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
import os
# Precomputes a list of the 100 first triangular numbers
A : List[Any] = [int(0.5 * n * (n + 1)) for n in range(1, 1_0_1)]
def __lowerCAmelCase ( ) -> Tuple:
__a = os.path.dirname(os.path.realpath(a__ ) )
__a = os.path.join(a__ , '''words.txt''' )
__a = ''''''
with open(a__ ) as f:
__a = f.readline()
__a = [word.strip('''"''' ) for word in words.strip('''\r\n''' ).split(''',''' )]
__a = [
word
for word in [sum(ord(a__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(a__ )
if __name__ == "__main__":
print(solution())
| 33
| 0
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class lowercase ( A__ ):
"""simple docstring"""
_a = 'bert-generation'
def __init__( self , UpperCamelCase_=50358 , UpperCamelCase_=1024 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=4096 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=1e-12 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_="absolute" , UpperCamelCase_=True , **UpperCamelCase_ , ):
'''simple docstring'''
super().__init__(pad_token_id=_a , bos_token_id=_a , eos_token_id=_a , **_a )
UpperCamelCase__ :str = vocab_size
UpperCamelCase__ :Dict = hidden_size
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :List[str] = num_attention_heads
UpperCamelCase__ :Optional[int] = hidden_act
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :Dict = hidden_dropout_prob
UpperCamelCase__ :List[Any] = attention_probs_dropout_prob
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :List[Any] = layer_norm_eps
UpperCamelCase__ :Optional[Any] = position_embedding_type
UpperCamelCase__ :List[Any] = use_cache
| 97
|
def lowerCAmelCase_ ( _snake_case : str , _snake_case : str ) -> bool:
'''simple docstring'''
__magic_name__ : Union[str, Any] = len(_snake_case ) + 1
__magic_name__ : List[str] = len(_snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
__magic_name__ : str = [[0 for i in range(_snake_case )] for j in range(_snake_case )]
# since string of zero length match pattern of zero length
__magic_name__ : Optional[int] = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , _snake_case ):
__magic_name__ : Optional[int] = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , _snake_case ):
__magic_name__ : Union[str, Any] = dp[0][j - 2] if pattern[j - 1] == "*" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , _snake_case ):
for j in range(1 , _snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
__magic_name__ : Optional[int] = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
__magic_name__ : Optional[Any] = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
__magic_name__ : List[Any] = dp[i - 1][j]
else:
__magic_name__ : Union[str, Any] = 0
else:
__magic_name__ : Dict = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
snake_case : Optional[Any] = "aab"
snake_case : List[str] = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F"{input_string} matches the given pattern {pattern}")
else:
print(F"{input_string} does not match with the given pattern {pattern}")
| 281
| 0
|
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any] , __snake_case : List[str] , __snake_case : int=7 , __snake_case : Optional[Any]=3 , __snake_case : Optional[int]=18 , __snake_case : Optional[int]=30 , __snake_case : Tuple=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : str=True , ) -> List[Any]:
_lowerCAmelCase = size if size is not None else {"""height""": 18, """width""": 18}
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = image_size
_lowerCAmelCase = min_resolution
_lowerCAmelCase = max_resolution
_lowerCAmelCase = do_resize
_lowerCAmelCase = size
_lowerCAmelCase = apply_ocr
def lowercase__ ( self : str ) -> Tuple:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class UpperCAmelCase ( snake_case_ , unittest.TestCase ):
_lowercase: Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowercase__ ( self : Any ) -> Union[str, Any]:
_lowerCAmelCase = LayoutLMvaImageProcessingTester(self )
@property
def lowercase__ ( self : Tuple ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowercase__ ( self : Any ) -> Optional[int]:
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
_lowerCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowercase__ ( self : List[str] ) -> Dict:
pass
def lowercase__ ( self : List[str] ) -> List[Any]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
_lowerCAmelCase = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowercase__ ( self : Optional[int] ) -> int:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowercase__ ( self : Optional[int] ) -> List[Any]:
# Initialize image_processing
_lowerCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
_lowerCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
_lowerCAmelCase = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowercase__ ( self : Tuple ) -> Optional[int]:
# with apply_OCR = True
_lowerCAmelCase = LayoutLMvaImageProcessor()
from datasets import load_dataset
_lowerCAmelCase = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
_lowerCAmelCase = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
_lowerCAmelCase = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
_lowerCAmelCase = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
_lowerCAmelCase = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
_lowerCAmelCase = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
_lowerCAmelCase = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 356
|
'''simple docstring'''
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import debug_launcher
from accelerate.test_utils import (
execute_subprocess_async,
require_cpu,
require_huggingface_suite,
require_multi_gpu,
require_single_gpu,
)
from accelerate.utils import patch_environment
@require_huggingface_suite
class UpperCAmelCase ( unittest.TestCase ):
def lowercase__ ( self : int ) -> Union[str, Any]:
_lowerCAmelCase = inspect.getfile(accelerate.test_utils )
_lowerCAmelCase = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""] )
from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401
_lowerCAmelCase = test_metrics
@require_cpu
def lowercase__ ( self : Optional[Any] ) -> Optional[Any]:
debug_launcher(self.test_metrics.main , num_processes=1 )
@require_cpu
def lowercase__ ( self : Tuple ) -> Tuple:
debug_launcher(self.test_metrics.main )
@require_single_gpu
def lowercase__ ( self : Union[str, Any] ) -> str:
self.test_metrics.main()
@require_multi_gpu
def lowercase__ ( self : str ) -> List[str]:
print(f"Found {torch.cuda.device_count()} devices." )
_lowerCAmelCase = ["""torchrun""", f"--nproc_per_node={torch.cuda.device_count()}", self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(__snake_case , env=os.environ.copy() )
| 220
| 0
|
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
lowercase__ : List[str] = imread(r'''digital_image_processing/image_data/lena_small.jpg''')
lowercase__ : Any = cvtColor(img, COLOR_BGR2GRAY)
def __lowercase ( ):
snake_case_ : Optional[int] = cn.convert_to_negative(_a )
# assert negative_img array for at least one True
assert negative_img.any()
def __lowercase ( ):
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(_a , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def __lowercase ( ):
snake_case_ : Any = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def __lowercase ( ):
snake_case_ : Any = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
snake_case_ : Union[str, Any] = canny.canny(_a )
# assert canny array for at least one True
assert canny_array.any()
def __lowercase ( ):
assert gg.gaussian_filter(_a , 5 , sigma=0.9 ).all()
def __lowercase ( ):
# laplace diagonals
snake_case_ : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
snake_case_ : Optional[int] = conv.img_convolve(_a , _a ).astype(_a )
assert res.any()
def __lowercase ( ):
assert med.median_filter(_a , 3 ).any()
def __lowercase ( ):
snake_case_, snake_case_ : Any = sob.sobel_filter(_a )
assert grad.any() and theta.any()
def __lowercase ( ):
snake_case_ : Union[str, Any] = sp.make_sepia(_a , 20 )
assert sepia.all()
def __lowercase ( _a = "digital_image_processing/image_data/lena_small.jpg" ):
snake_case_ : Union[str, Any] = bs.Burkes(imread(_a , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def __lowercase ( _a = "digital_image_processing/image_data/lena_small.jpg" , ):
snake_case_ : Optional[Any] = rs.NearestNeighbour(imread(_a , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def __lowercase ( ):
snake_case_ : Tuple = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
snake_case_ : Dict = imread(_a , 0 )
# Test for get_neighbors_pixel function() return not None
snake_case_ : List[Any] = 0
snake_case_ : List[Any] = 0
snake_case_ : Optional[int] = image[x_coordinate][y_coordinate]
snake_case_ : Any = lbp.get_neighbors_pixel(
_a , _a , _a , _a )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
snake_case_ : List[str] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
snake_case_ : int = lbp.local_binary_value(_a , _a , _a )
assert lbp_image.any()
| 264
|
"""simple docstring"""
from functools import lru_cache
@lru_cache
def __lowercase ( _a ):
if num < 0:
raise ValueError('''Number should not be negative.''' )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 264
| 1
|
'''simple docstring'''
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowercase : Optional[int] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE__ ( __A ) -> str:
_snake_case = torch.load(__A , map_location='cpu' )
if "model" in sd.keys():
_snake_case = torch.load(__A , map_location='cpu' )['model']
# pop unnecessary weights
_snake_case = [
'decoder.version',
'decoder.output_projection.weight',
]
for key in keys_to_delete:
if key in sd:
sd.pop(__A )
_snake_case = {
'decoder.project_in_dim.weight': 'decoder.project_in.weight',
'decoder.project_out_dim.weight': 'decoder.project_out.weight',
'decoder.layer_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.layer_norm.bias': 'decoder.final_layer_norm.bias',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
_snake_case = sd.pop(__A )
_snake_case = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
_snake_case = sd[key]
# We split QKV in separate Q,K,V
_snake_case = key.replace('.qkv_proj.' , '.q_proj.' )
_snake_case = key.replace('.qkv_proj.' , '.k_proj.' )
_snake_case = key.replace('.qkv_proj.' , '.v_proj.' )
_snake_case = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
_snake_case , _snake_case , _snake_case = torch.split(__A , depth // 3 , dim=0 )
_snake_case = q
_snake_case = k
_snake_case = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A=None ) -> Optional[int]:
_snake_case = load_checkpoint(__A )
if config is not None:
_snake_case = OPTConfig.from_pretrained(__A )
else:
_snake_case = OPTConfig()
_snake_case = OPTModel(__A ).half().eval()
model.load_state_dict(__A )
# Check results
Path(__A ).mkdir(exist_ok=__A )
model.save_pretrained(__A )
if __name__ == "__main__":
lowercase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--fairseq_path",
type=str,
help=(
"path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"
" https://huggingface.co/models?other=opt_metasq"
),
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.")
lowercase : Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 160
|
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowercase : Union[str, Any] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"EleutherAI/gpt-j-6B": "https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json",
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class __UpperCAmelCase ( _lowerCamelCase ):
__lowercase = """gptj"""
__lowercase = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , lowerCAmelCase_=5_04_00 , lowerCAmelCase_=20_48 , lowerCAmelCase_=40_96 , lowerCAmelCase_=28 , lowerCAmelCase_=16 , lowerCAmelCase_=64 , lowerCAmelCase_=None , lowerCAmelCase_="gelu_new" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=1E-5 , lowerCAmelCase_=0.02 , lowerCAmelCase_=True , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=5_02_56 , lowerCAmelCase_=False , **lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = vocab_size
_snake_case = n_positions
_snake_case = n_embd
_snake_case = n_layer
_snake_case = n_head
_snake_case = n_inner
_snake_case = rotary_dim
_snake_case = activation_function
_snake_case = resid_pdrop
_snake_case = embd_pdrop
_snake_case = attn_pdrop
_snake_case = layer_norm_epsilon
_snake_case = initializer_range
_snake_case = use_cache
_snake_case = bos_token_id
_snake_case = eos_token_id
super().__init__(
bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ , tie_word_embeddings=lowerCAmelCase_ , **lowerCAmelCase_ )
class __UpperCAmelCase ( _lowerCamelCase ):
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ = "default" , lowerCAmelCase_ = None , lowerCAmelCase_ = False , ):
"""simple docstring"""
super().__init__(lowerCAmelCase_ , task=lowerCAmelCase_ , patching_specs=lowerCAmelCase_ , use_past=lowerCAmelCase_ )
if not getattr(self._config , 'pad_token_id' , lowerCAmelCase_ ):
# TODO: how to do that better?
_snake_case = 0
@property
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase_ , direction='inputs' )
_snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._config.n_layer
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return self._config.n_head
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ = -1 , lowerCAmelCase_ = -1 , lowerCAmelCase_ = False , lowerCAmelCase_ = None , ):
"""simple docstring"""
_snake_case = super(lowerCAmelCase_ , self ).generate_dummy_inputs(
lowerCAmelCase_ , batch_size=lowerCAmelCase_ , seq_length=lowerCAmelCase_ , is_pair=lowerCAmelCase_ , framework=lowerCAmelCase_ )
# We need to order the input in the way they appears in the forward()
_snake_case = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_snake_case , _snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_snake_case = seqlen + 2
_snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
_snake_case = [
(torch.zeros(lowerCAmelCase_ ), torch.zeros(lowerCAmelCase_ )) for _ in range(self.num_layers )
]
_snake_case = common_inputs['attention_mask']
if self.use_past:
_snake_case = ordered_inputs['attention_mask'].dtype
_snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase_ , lowerCAmelCase_ , dtype=lowerCAmelCase_ )] , dim=1 )
return ordered_inputs
@property
def lowerCamelCase ( self ):
"""simple docstring"""
return 13
| 160
| 1
|
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def _SCREAMING_SNAKE_CASE (A = "https://www.worldometers.info/coronavirus" ) -> dict:
"""simple docstring"""
lowercase__ = BeautifulSoup(requests.get(A ).text , '''html.parser''' )
lowercase__ = soup.findAll('''h1''' )
lowercase__ = soup.findAll('''div''' , {'''class''': '''maincounter-number'''} )
keys += soup.findAll('''span''' , {'''class''': '''panel-title'''} )
values += soup.findAll('''div''' , {'''class''': '''number-table-main'''} )
return {key.text.strip(): value.text.strip() for key, value in zip(A , A )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(f"""{key}\n{value}\n""")
| 2
|
'''simple docstring'''
from __future__ import annotations
def _SCREAMING_SNAKE_CASE (A ) -> bool:
"""simple docstring"""
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2
| 1
|
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
__snake_case : Union[str, Any] = [1]
for i in range(2 , __lowerCamelCase ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
__snake_case : List[Any] = []
__snake_case : Tuple = list(range(__lowerCamelCase ) )
# Find permutation
while factorials:
__snake_case : List[str] = factorials.pop()
__snake_case , __snake_case : str = divmod(__lowerCamelCase , __lowerCamelCase )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 134
|
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_snake_case : int = "scheduler_config.json"
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : Tuple = 1
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Union[str, Any] = 3
__UpperCAmelCase : List[Any] = 4
__UpperCAmelCase : Tuple = 5
@dataclass
class a (_lowerCAmelCase ):
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
class a :
"""simple docstring"""
__UpperCAmelCase : Dict = SCHEDULER_CONFIG_NAME
__UpperCAmelCase : Union[str, Any] = ["dtype"]
__UpperCAmelCase : Tuple = []
__UpperCAmelCase : int = True
@classmethod
def __snake_case ( cls : List[str] , lowerCamelCase : Dict[str, Any] = None , lowerCamelCase : Optional[str] = None , lowerCamelCase : List[str]=False , **lowerCamelCase : Union[str, Any] , ) -> List[str]:
__snake_case , __snake_case : List[str] = cls.load_config(
pretrained_model_name_or_path=lowerCamelCase , subfolder=lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase , )
__snake_case , __snake_case : Dict = cls.from_config(lowerCamelCase , return_unused_kwargs=lowerCamelCase , **lowerCamelCase )
if hasattr(lowerCamelCase , "create_state" ) and getattr(lowerCamelCase , "has_state" , lowerCamelCase ):
__snake_case : Tuple = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __snake_case ( self : Any , lowerCamelCase : Union[str, os.PathLike] , lowerCamelCase : bool = False , **lowerCamelCase : List[Any] ) -> int:
self.save_config(save_directory=lowerCamelCase , push_to_hub=lowerCamelCase , **lowerCamelCase )
@property
def __snake_case ( self : Tuple ) -> List[Any]:
return self._get_compatibles()
@classmethod
def __snake_case ( cls : int ) -> Dict:
__snake_case : Tuple = list(set([cls.__name__] + cls._compatibles ) )
__snake_case : int = importlib.import_module(__name__.split("." )[0] )
__snake_case : Tuple = [
getattr(lowerCamelCase , lowerCamelCase ) for c in compatible_classes_str if hasattr(lowerCamelCase , lowerCamelCase )
]
return compatible_classes
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
assert len(__lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(__lowerCamelCase ) - x.ndim) ) , __lowerCamelCase )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase=0.9_9_9 , __lowerCamelCase=jnp.floataa ):
def alpha_bar(__lowerCamelCase ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__snake_case : List[Any] = []
for i in range(__lowerCamelCase ):
__snake_case : Dict = i / num_diffusion_timesteps
__snake_case : str = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(__lowerCamelCase ) / alpha_bar(__lowerCamelCase ) , __lowerCamelCase ) )
return jnp.array(__lowerCamelCase , dtype=__lowerCamelCase )
@flax.struct.dataclass
class a :
"""simple docstring"""
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
__UpperCAmelCase : jnp.ndarray
@classmethod
def __snake_case ( cls : Union[str, Any] , lowerCamelCase : int ) -> List[Any]:
__snake_case : Dict = scheduler.config
if config.trained_betas is not None:
__snake_case : Dict = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__snake_case : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__snake_case : str = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__snake_case : Optional[Any] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
__snake_case : Any = 1.0 - betas
__snake_case : int = jnp.cumprod(lowerCamelCase , axis=0 )
return cls(
alphas=lowerCamelCase , betas=lowerCamelCase , alphas_cumprod=lowerCamelCase , )
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case : List[Any] = state.alphas_cumprod
__snake_case : str = alphas_cumprod[timesteps] ** 0.5
__snake_case : Dict = sqrt_alpha_prod.flatten()
__snake_case : str = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
__snake_case : Tuple = (1 - alphas_cumprod[timesteps]) ** 0.5
__snake_case : str = sqrt_one_minus_alpha_prod.flatten()
__snake_case : Tuple = broadcast_to_shape_from_left(__lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Union[str, Any] = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : List[str] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ):
__snake_case , __snake_case : Dict = get_sqrt_alpha_prod(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
__snake_case : Optional[int] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 134
| 1
|
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
snake_case : int = {'''configuration_van''': ['''VAN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VanConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
'''VAN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VanForImageClassification''',
'''VanModel''',
'''VanPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
snake_case : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 94
|
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , A : Any , A : Tuple=7 , A : Tuple=3 , A : Optional[Any]=30 , A : List[Any]=4_00 , A : Tuple=True , A : Dict=None , A : List[str]=True , A : Optional[int]=[0.5, 0.5, 0.5] , A : Tuple=[0.5, 0.5, 0.5] , A : List[str]=True , A : List[Any]=1 / 2_55 , A : Union[str, Any]=True , ) -> Tuple:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
lowercase_ : Optional[int] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 13_33}
lowercase_ : Optional[int] = parent
lowercase_ : str = batch_size
lowercase_ : Tuple = num_channels
lowercase_ : str = min_resolution
lowercase_ : Any = max_resolution
lowercase_ : str = do_resize
lowercase_ : Any = size
lowercase_ : Optional[int] = do_normalize
lowercase_ : List[str] = image_mean
lowercase_ : Optional[Any] = image_std
lowercase_ : int = do_rescale
lowercase_ : List[str] = rescale_factor
lowercase_ : int = do_pad
def A ( self : Any ) -> str:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A ( self : Optional[Any] , A : int , A : int=False ) -> Tuple:
if not batched:
lowercase_ : Optional[int] = image_inputs[0]
if isinstance(A , Image.Image ):
lowercase_ , lowercase_ : int = image.size
else:
lowercase_ , lowercase_ : Tuple = image.shape[1], image.shape[2]
if w < h:
lowercase_ : int = int(self.size['''shortest_edge'''] * h / w )
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
elif w > h:
lowercase_ : Optional[Any] = self.size['''shortest_edge''']
lowercase_ : Optional[int] = int(self.size['''shortest_edge'''] * w / h )
else:
lowercase_ : Any = self.size['''shortest_edge''']
lowercase_ : Any = self.size['''shortest_edge''']
else:
lowercase_ : Tuple = []
for image in image_inputs:
lowercase_ , lowercase_ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase_ : Union[str, Any] = max(A , key=lambda A : item[0] )[0]
lowercase_ : Optional[Any] = max(A , key=lambda A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _UpperCAmelCase ( _A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = YolosImageProcessor if is_vision_available() else None
def A ( self : Optional[int] ) -> Optional[int]:
lowercase_ : Optional[Any] = YolosImageProcessingTester(self )
@property
def A ( self : str ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def A ( self : Optional[int] ) -> List[str]:
lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A , '''image_mean''' ) )
self.assertTrue(hasattr(A , '''image_std''' ) )
self.assertTrue(hasattr(A , '''do_normalize''' ) )
self.assertTrue(hasattr(A , '''do_resize''' ) )
self.assertTrue(hasattr(A , '''size''' ) )
def A ( self : Dict ) -> Tuple:
lowercase_ : Dict = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 13_33} )
self.assertEqual(image_processor.do_pad , A )
lowercase_ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=A )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , A )
def A ( self : Optional[int] ) -> Tuple:
pass
def A ( self : Tuple ) -> int:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase_ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=A )
for image in image_inputs:
self.assertIsInstance(A , Image.Image )
# Test not batched input
lowercase_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ , lowercase_ : Dict = self.image_processor_tester.get_expected_values(A , batched=A )
lowercase_ : str = image_processing(A , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : str ) -> Any:
# Initialize image_processing
lowercase_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase_ : List[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A )
for image in image_inputs:
self.assertIsInstance(A , np.ndarray )
# Test not batched input
lowercase_ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : int = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Optional[int] = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[Any] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[int]:
# Initialize image_processing
lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test not batched input
lowercase_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : Union[str, Any] = self.image_processor_tester.get_expected_values(A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase_ : Any = image_processing(A , return_tensors='''pt''' ).pixel_values
lowercase_ , lowercase_ : List[str] = self.image_processor_tester.get_expected_values(A , batched=A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A ( self : Tuple ) -> Optional[Any]:
# Initialize image_processings
lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
lowercase_ : Tuple = self.image_processing_class(do_resize=A , do_normalize=A , do_rescale=A )
# create random PyTorch tensors
lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A )
for image in image_inputs:
self.assertIsInstance(A , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase_ : Union[str, Any] = image_processing_a.pad(A , return_tensors='''pt''' )
lowercase_ : List[Any] = image_processing_a(A , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A ( self : str ) -> List[Any]:
# prepare image and target
lowercase_ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase_ : List[Any] = json.loads(f.read() )
lowercase_ : Tuple = {'''image_id''': 3_97_69, '''annotations''': target}
# encode them
lowercase_ : Union[str, Any] = YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase_ : List[Any] = image_processing(images=A , annotations=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : Tuple = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : Any = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[Any] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Optional[Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify orig_size
lowercase_ : List[str] = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : Optional[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
@slow
def A ( self : List[Any] ) -> Dict:
# prepare image, target and masks_path
lowercase_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase_ : str = json.loads(f.read() )
lowercase_ : int = {'''file_name''': '''000000039769.png''', '''image_id''': 3_97_69, '''segments_info''': target}
lowercase_ : List[Any] = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase_ : int = YolosImageProcessor(format='''coco_panoptic''' )
lowercase_ : Any = image_processing(images=A , annotations=A , masks_path=A , return_tensors='''pt''' )
# verify pixel values
lowercase_ : Optional[Any] = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['''pixel_values'''].shape , A )
lowercase_ : Tuple = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , A , atol=1e-4 ) )
# verify area
lowercase_ : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , A ) )
# verify boxes
lowercase_ : str = torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , A )
lowercase_ : List[str] = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , A , atol=1e-3 ) )
# verify image_id
lowercase_ : List[str] = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , A ) )
# verify is_crowd
lowercase_ : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , A ) )
# verify class_labels
lowercase_ : Any = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , A ) )
# verify masks
lowercase_ : Dict = 82_28_73
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , A )
# verify orig_size
lowercase_ : Tuple = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , A ) )
# verify size
lowercase_ : List[str] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , A ) )
| 33
| 0
|
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = inspect.getfile(accelerate.test_utils )
lowercase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
lowercase_ = ["accelerate", "launch"]
lowercase_ = Path.home() / ".cache/huggingface/accelerate"
lowercase_ = "default_config.yaml"
lowercase_ = config_folder / config_file
lowercase_ = config_folder / "_default_config.yaml"
lowercase_ = Path("tests/test_configs" )
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : List[str]) ->Optional[int]:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path)
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any]) ->int:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path)
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: List[Any] =self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy())
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[Any]:
'''simple docstring'''
for config in sorted(self.test_config_path.glob("**/*.yaml")):
with self.subTest(config_file=UpperCAmelCase_):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(UpperCAmelCase_), self.test_file_path] , env=os.environ.copy())
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[int]:
'''simple docstring'''
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy())
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
lowercase_ = "test-tpu"
lowercase_ = "us-central1-a"
lowercase_ = "ls"
lowercase_ = ["accelerate", "tpu-config"]
lowercase_ = "cd /usr/share"
lowercase_ = "tests/test_samples/test_command_file.sh"
lowercase_ = "Running gcloud compute tpus tpu-vm ssh"
def SCREAMING_SNAKE_CASE_ (self : str) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=UpperCAmelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Dict) ->str:
'''simple docstring'''
lowerCamelCase__: int =run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=UpperCAmelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Tuple:
'''simple docstring'''
lowerCamelCase__: List[Any] =run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=UpperCAmelCase_)
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : str) ->Any:
'''simple docstring'''
lowerCamelCase__: Tuple =run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=UpperCAmelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : str) ->int:
'''simple docstring'''
lowerCamelCase__: Optional[int] =run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=UpperCAmelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: str =run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=UpperCAmelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : int) ->Tuple:
'''simple docstring'''
lowerCamelCase__: str =run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=UpperCAmelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: int =run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=UpperCAmelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=UpperCAmelCase_ , )
self.assertIn(
F"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , UpperCAmelCase_ , )
| 273
|
def lowerCAmelCase_ ( __a , __a ) -> Tuple:
"""simple docstring"""
assert x is not None
assert y is not None
lowerCamelCase__: Any =len(__a )
lowerCamelCase__: int =len(__a )
# declaring the array for storing the dp values
lowerCamelCase__: List[Any] =[[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
lowerCamelCase__: str =1 if x[i - 1] == y[j - 1] else 0
lowerCamelCase__: str =max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
lowerCamelCase__: Any =""
lowerCamelCase__ , lowerCamelCase__: str =m, n
while i > 0 and j > 0:
lowerCamelCase__: Union[str, Any] =1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
lowerCamelCase__: Any =x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
__A = "AGGTAB"
__A = "GXTXAYB"
__A = 4
__A = "GTAB"
__A , __A = longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 273
| 1
|
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Dict ):
snake_case__ : Any = 0
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
def __lowerCamelCase ( self :Optional[int] ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Any = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
snake_case__ : Optional[int] = Path(_lowerCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} ,open(_lowerCamelCase ,'''w''' ) ,)
json.dump({'''model_type''': '''clip'''} ,open(_lowerCamelCase ,'''w''' ) )
snake_case__ : List[str] = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
def __lowerCamelCase ( self :Any ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Any = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
snake_case__ : List[str] = Path(_lowerCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} ,open(_lowerCamelCase ,'''w''' ) ,)
json.dump({'''model_type''': '''clip'''} ,open(_lowerCamelCase ,'''w''' ) )
snake_case__ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
def __lowerCamelCase ( self :Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[Any] = CLIPConfig()
# Create a dummy config file with image_proceesor_type
snake_case__ : Union[str, Any] = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
snake_case__ : Dict = Path(_lowerCamelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} ,open(_lowerCamelCase ,'''w''' ) ,)
json.dump({'''model_type''': '''clip'''} ,open(_lowerCamelCase ,'''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
snake_case__ : Optional[Any] = AutoImageProcessor.from_pretrained(_lowerCamelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
snake_case__ : Optional[Any] = CLIPImageProcessor(**_lowerCamelCase )
# save in new folder
model_config.save_pretrained(_lowerCamelCase )
config.save_pretrained(_lowerCamelCase )
snake_case__ : int = AutoImageProcessor.from_pretrained(_lowerCamelCase )
# make sure private variable is not incorrectly saved
snake_case__ : List[str] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
def __lowerCamelCase ( self :Tuple ):
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Any = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} ,open(_lowerCamelCase ,'''w''' ) ,)
snake_case__ : Tuple = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
def __lowerCamelCase ( self :Dict ):
with self.assertRaisesRegex(
_lowerCamelCase ,'''clip-base is not a local folder and is not a valid model identifier''' ):
snake_case__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def __lowerCamelCase ( self :List[str] ):
with self.assertRaisesRegex(
_lowerCamelCase ,r'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
snake_case__ : str = AutoImageProcessor.from_pretrained(_lowerCamelCase ,revision='''aaaaaa''' )
def __lowerCamelCase ( self :List[Any] ):
with self.assertRaisesRegex(
_lowerCamelCase ,'''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' ,):
snake_case__ : List[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def __lowerCamelCase ( self :int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowerCamelCase ):
snake_case__ : Union[str, Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowerCamelCase ):
snake_case__ : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' ,trust_remote_code=_lowerCamelCase )
snake_case__ : Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' ,trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,'''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCamelCase )
snake_case__ : Any = AutoImageProcessor.from_pretrained(_lowerCamelCase ,trust_remote_code=_lowerCamelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ ,'''NewImageProcessor''' )
def __lowerCamelCase ( self :Any ):
try:
AutoConfig.register('''custom''' ,_lowerCamelCase )
AutoImageProcessor.register(_lowerCamelCase ,_lowerCamelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowerCamelCase ):
AutoImageProcessor.register(_lowerCamelCase ,_lowerCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Optional[int] = Path(_lowerCamelCase ) / '''preprocessor_config.json'''
snake_case__ : Union[str, Any] = Path(_lowerCamelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} ,open(_lowerCamelCase ,'''w''' ) ,)
json.dump({'''model_type''': '''clip'''} ,open(_lowerCamelCase ,'''w''' ) )
snake_case__ : int = CustomImageProcessor.from_pretrained(_lowerCamelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowerCamelCase )
snake_case__ : List[Any] = AutoImageProcessor.from_pretrained(_lowerCamelCase )
self.assertIsInstance(_lowerCamelCase ,_lowerCamelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def __lowerCamelCase ( self :List[Any] ):
class a ( a_ ):
__lowerCAmelCase : Union[str, Any] = True
try:
AutoConfig.register('''custom''' ,_lowerCamelCase )
AutoImageProcessor.register(_lowerCamelCase ,_lowerCamelCase )
# If remote code is not set, the default is to use local
snake_case__ : Dict = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ ,'''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
snake_case__ : Optional[int] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' ,trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,'''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
snake_case__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' ,trust_remote_code=_lowerCamelCase )
self.assertEqual(image_processor.__class__.__name__ ,'''NewImageProcessor''' )
self.assertTrue(not hasattr(_lowerCamelCase ,'''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 230
|
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
_UpperCamelCase : Optional[Any] = TypeVar('T')
class a ( Generic[T] ):
def __init__( self , _lowerCamelCase = True ):
lowercase = {} # dictionary of lists
lowercase = directed
def UpperCamelCase_ ( self , _lowerCamelCase , _lowerCamelCase ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
self.adj_list[destination_vertex].append(_lowerCamelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
lowercase = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_lowerCamelCase )
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase = [destination_vertex]
lowercase = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_lowerCamelCase )
lowercase = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase = [destination_vertex]
lowercase = []
return self
def __repr__( self ):
return pformat(self.adj_list )
| 220
| 0
|
"""simple docstring"""
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=False ):
A__ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("""module.cls_token""", """vit.embeddings.cls_token"""),
("""module.patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""module.patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""module.pos_embed""", """vit.embeddings.position_embeddings"""),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""module.norm.weight""", """layernorm.weight"""),
("""module.norm.bias""", """layernorm.bias"""),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
] )
return rename_keys
def _snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=False ):
for i in range(config.num_hidden_layers ):
if base_model:
A__ = """"""
else:
A__ = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A__ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
A__ = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[
: config.hidden_size, :
]
A__ = in_proj_bias[: config.hidden_size]
A__ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A__ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A__ = in_proj_weight[
-config.hidden_size :, :
]
A__ = in_proj_bias[-config.hidden_size :]
def _snake_case ( UpperCAmelCase_ : int ):
A__ = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : Union[str, Any] ):
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
A__ = [
"""module.fc.fc1.weight""",
"""module.fc.fc1.bias""",
"""module.fc.bn1.weight""",
"""module.fc.bn1.bias""",
"""module.fc.bn1.running_mean""",
"""module.fc.bn1.running_var""",
"""module.fc.bn1.num_batches_tracked""",
"""module.fc.fc2.weight""",
"""module.fc.fc2.bias""",
"""module.fc.bn2.weight""",
"""module.fc.bn2.bias""",
"""module.fc.bn2.running_mean""",
"""module.fc.bn2.running_var""",
"""module.fc.bn2.num_batches_tracked""",
"""module.fc.fc3.weight""",
"""module.fc.fc3.bias""",
]
for k in ignore_keys:
state_dict.pop(UpperCAmelCase_ , UpperCAmelCase_ )
def _snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any ):
A__ = dct.pop(UpperCAmelCase_ )
A__ = val
def _snake_case ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple ):
A__ = ViTMSNConfig()
A__ = 1000
A__ = """datasets/huggingface/label-files"""
A__ = """imagenet-1k-id2label.json"""
A__ = json.load(open(hf_hub_download(UpperCAmelCase_ , UpperCAmelCase_ ) , """r""" ) )
A__ = {int(UpperCAmelCase_ ): v for k, v in idalabel.items()}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A__ = 384
A__ = 1536
A__ = 6
elif "l16" in checkpoint_url:
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
A__ = 0.1
elif "b4" in checkpoint_url:
A__ = 4
elif "l7" in checkpoint_url:
A__ = 7
A__ = 1024
A__ = 4096
A__ = 24
A__ = 16
A__ = 0.1
A__ = ViTMSNModel(UpperCAmelCase_ )
A__ = torch.hub.load_state_dict_from_url(UpperCAmelCase_ , map_location="""cpu""" )["""target_encoder"""]
A__ = ViTImageProcessor(size=config.image_size )
remove_projection_head(UpperCAmelCase_ )
A__ = create_rename_keys(UpperCAmelCase_ , base_model=UpperCAmelCase_ )
for src, dest in rename_keys:
rename_key(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
read_in_q_k_v(UpperCAmelCase_ , UpperCAmelCase_ , base_model=UpperCAmelCase_ )
model.load_state_dict(UpperCAmelCase_ )
model.eval()
A__ = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A__ = Image.open(requests.get(UpperCAmelCase_ , stream=UpperCAmelCase_ ).raw )
A__ = ViTImageProcessor(
size=config.image_size , image_mean=UpperCAmelCase_ , image_std=UpperCAmelCase_ )
A__ = image_processor(images=UpperCAmelCase_ , return_tensors="""pt""" )
# forward pass
torch.manual_seed(2 )
A__ = model(**UpperCAmelCase_ )
A__ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A__ = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
A__ = torch.tensor([[14.28_89, -18.90_45, 11.72_81]] )
elif "l16" in checkpoint_url:
A__ = torch.tensor([[41.50_28, -22.86_81, 45.64_75]] )
elif "b4" in checkpoint_url:
A__ = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
A__ = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , UpperCAmelCase_ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(UpperCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ : Dict = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 69
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ : List[str] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ : str = {
'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json',
'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json',
'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json',
'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json',
'funnel-transformer/intermediate': (
'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json'
),
'funnel-transformer/intermediate-base': (
'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json'
),
'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json',
'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json',
'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json',
'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json',
}
class a ( _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase = "funnel"
UpperCAmelCase = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self: str , UpperCamelCase: List[Any]=3_05_22 , UpperCamelCase: Any=[4, 4, 4] , UpperCamelCase: Dict=None , UpperCamelCase: Dict=2 , UpperCamelCase: Optional[int]=7_68 , UpperCamelCase: List[str]=12 , UpperCamelCase: Optional[Any]=64 , UpperCamelCase: str=30_72 , UpperCamelCase: Any="gelu_new" , UpperCamelCase: Optional[Any]=0.1 , UpperCamelCase: Any=0.1 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Tuple=0.1 , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Tuple=1e-9 , UpperCamelCase: Tuple="mean" , UpperCamelCase: str="relative_shift" , UpperCamelCase: Any=True , UpperCamelCase: List[Any]=True , UpperCamelCase: int=True , **UpperCamelCase: List[str] , ):
"""simple docstring"""
A__ = vocab_size
A__ = block_sizes
A__ = [1] * len(UpperCamelCase ) if block_repeats is None else block_repeats
assert len(UpperCamelCase ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
A__ = num_decoder_layers
A__ = d_model
A__ = n_head
A__ = d_head
A__ = d_inner
A__ = hidden_act
A__ = hidden_dropout
A__ = attention_dropout
A__ = activation_dropout
A__ = initializer_range
A__ = initializer_std
A__ = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f"""Got {pooling_type} for `pooling_type` but only 'mean' and 'max' are supported."""
A__ = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f"""Got {attention_type} for `attention_type` but only 'relative_shift' and 'factorized' are supported."""
A__ = attention_type
A__ = separate_cls
A__ = truncate_seq
A__ = pool_q_only
super().__init__(**UpperCamelCase )
@property
def UpperCamelCase ( self: Optional[int] ):
"""simple docstring"""
return sum(self.block_sizes )
@num_hidden_layers.setter
def UpperCamelCase ( self: Union[str, Any] , UpperCamelCase: Tuple ):
"""simple docstring"""
raise NotImplementedError(
"""This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" )
@property
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
return len(self.block_sizes )
@num_blocks.setter
def UpperCamelCase ( self: Any , UpperCamelCase: Dict ):
"""simple docstring"""
raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
| 69
| 1
|
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=3 , _UpperCAmelCase=18 , _UpperCAmelCase=30 , _UpperCAmelCase=400 , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=True , ):
__a : List[str] = size if size is not None else {'''shortest_edge''': 20}
__a : Any = crop_size if crop_size is not None else {'''height''': 18, '''width''': 18}
__a : Optional[Any] = parent
__a : Tuple = batch_size
__a : List[str] = num_channels
__a : Optional[int] = image_size
__a : str = min_resolution
__a : List[str] = max_resolution
__a : List[str] = do_resize
__a : int = size
__a : Optional[Any] = do_center_crop
__a : Tuple = crop_size
__a : str = do_flip_channel_order
def _lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MobileViTImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self ):
__a : Any = MobileViTImageProcessingTester(self )
@property
def _lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self ):
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''center_crop''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''do_flip_channel_order''' ) )
def _lowerCamelCase ( self ):
__a : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 20} )
self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} )
__a : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def _lowerCamelCase ( self ):
pass
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__a : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input
__a : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a : List[Any] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__a : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input
__a : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a : List[Any] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def _lowerCamelCase ( self ):
# Initialize image_processing
__a : int = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__a : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input
__a : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
__a : List[str] = image_processing(_UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 160
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A = logging.get_logger()
def __A ( a_ :int , a_ :str , a_ :LevitConfig , a_ :Path , a_ :bool = True) -> Union[str, Any]:
print(F"""Converting {name}...""")
with torch.no_grad():
if hidden_sizes == 1_28:
if name[-1] == "S":
__a : Optional[int] = timm.create_model('''levit_128s''' , pretrained=a_)
else:
__a : List[Any] = timm.create_model('''levit_128''' , pretrained=a_)
if hidden_sizes == 1_92:
__a : List[Any] = timm.create_model('''levit_192''' , pretrained=a_)
if hidden_sizes == 2_56:
__a : Any = timm.create_model('''levit_256''' , pretrained=a_)
if hidden_sizes == 3_84:
__a : Optional[int] = timm.create_model('''levit_384''' , pretrained=a_)
from_model.eval()
__a : Dict = LevitForImageClassificationWithTeacher(a_).eval()
__a : Optional[int] = OrderedDict()
__a : Tuple = from_model.state_dict()
__a : Dict = list(from_model.state_dict().keys())
__a : str = list(our_model.state_dict().keys())
print(len(a_) , len(a_))
for i in range(len(a_)):
__a : int = weights[og_keys[i]]
our_model.load_state_dict(a_)
__a : Union[str, Any] = torch.randn((2, 3, 2_24, 2_24))
__a : Union[str, Any] = from_model(a_)
__a : Optional[int] = our_model(a_).logits
assert torch.allclose(a_ , a_), "The model logits don't match the original one."
__a : List[Any] = name
print(a_)
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name)
__a : Tuple = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name)
print(F"""Pushed {checkpoint_name}""")
def __A ( a_ :Path , a_ :str = None , a_ :bool = True) -> Optional[Any]:
__a : List[Any] = '''imagenet-1k-id2label.json'''
__a : Tuple = 10_00
__a : List[str] = (1, num_labels)
__a : Union[str, Any] = '''huggingface/label-files'''
__a : Dict = num_labels
__a : List[Any] = json.load(open(hf_hub_download(a_ , a_ , repo_type='''dataset''') , '''r'''))
__a : str = {int(a_): v for k, v in idalabel.items()}
__a : int = idalabel
__a : List[str] = {v: k for k, v in idalabel.items()}
__a : Optional[int] = partial(a_ , num_labels=a_ , idalabel=a_ , labelaid=a_)
__a : Optional[int] = {
'''levit-128S''': 1_28,
'''levit-128''': 1_28,
'''levit-192''': 1_92,
'''levit-256''': 2_56,
'''levit-384''': 3_84,
}
__a : int = {
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_28, 2_56, 3_84] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_92, 2_88, 3_84] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_56, 3_84, 5_12] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_84, 5_12, 7_68] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , a_ , names_to_config[model_name] , a_ , a_)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , a_ , a_ , a_ , a_)
return config, expected_shape
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
A = parser.parse_args()
A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 160
| 1
|
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__a ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=__a )
__lowerCAmelCase = [torch.ones((1, 3, 5, 5) )]
__lowerCAmelCase = [[17_64, 26_46]]
__lowerCAmelCase = [[6_83, 10_24]]
__lowerCAmelCase = processor.post_process_masks(__a , __a , __a )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCAmelCase = processor.post_process_masks(
__a , torch.tensor(__a ) , torch.tensor(__a ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
__lowerCAmelCase = processor.post_process_masks(__a , np.array(__a ) , np.array(__a ) )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(__a ):
__lowerCAmelCase = processor.post_process_masks(__a , np.array(__a ) , np.array(__a ) )
@require_vision
@require_tf
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__a ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case ( self ):
__lowerCAmelCase = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = self.get_image_processor(do_normalize=__a , padding_value=1.0 )
__lowerCAmelCase = SamProcessor.from_pretrained(self.tmpdirname , do_normalize=__a , padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __a )
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="np" )
__lowerCAmelCase = processor(images=__a , return_tensors="np" )
input_feat_extract.pop("original_sizes" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("reshaped_input_sizes" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=__a )
__lowerCAmelCase = [tf.ones((1, 3, 5, 5) )]
__lowerCAmelCase = [[17_64, 26_46]]
__lowerCAmelCase = [[6_83, 10_24]]
__lowerCAmelCase = processor.post_process_masks(__a , __a , __a , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCAmelCase = processor.post_process_masks(
__a , tf.convert_to_tensor(__a ) , tf.convert_to_tensor(__a ) , return_tensors="tf" , )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
# should also work with np
__lowerCAmelCase = [np.ones((1, 3, 5, 5) )]
__lowerCAmelCase = processor.post_process_masks(
__a , np.array(__a ) , np.array(__a ) , return_tensors="tf" )
self.assertEqual(masks[0].shape , (1, 3, 17_64, 26_46) )
__lowerCAmelCase = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__lowerCAmelCase = processor.post_process_masks(
__a , np.array(__a ) , np.array(__a ) , return_tensors="tf" )
@require_vision
@require_torchvision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def snake_case ( self ):
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = SamImageProcessor()
__lowerCAmelCase = SamProcessor(__a )
processor.save_pretrained(self.tmpdirname )
def snake_case ( self , **__a ):
return AutoProcessor.from_pretrained(self.tmpdirname , **__a ).image_processor
def snake_case ( self ):
shutil.rmtree(self.tmpdirname )
def snake_case ( self ):
__lowerCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase = [Image.fromarray(np.moveaxis(__a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=__a )
__lowerCAmelCase = np.random.randint(0 , 2 , size=(1, 3, 5, 5) ).astype(np.floataa )
__lowerCAmelCase = [tf.convert_to_tensor(__a )]
__lowerCAmelCase = [torch.tensor(__a )]
__lowerCAmelCase = [[17_64, 26_46]]
__lowerCAmelCase = [[6_83, 10_24]]
__lowerCAmelCase = processor.post_process_masks(
__a , __a , __a , return_tensors="tf" )
__lowerCAmelCase = processor.post_process_masks(
__a , __a , __a , return_tensors="pt" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def snake_case ( self ):
__lowerCAmelCase = self.get_image_processor()
__lowerCAmelCase = SamProcessor(image_processor=__a )
__lowerCAmelCase = self.prepare_image_inputs()
__lowerCAmelCase = image_processor(__a , return_tensors="pt" )["pixel_values"].numpy()
__lowerCAmelCase = processor(images=__a , return_tensors="pt" )["pixel_values"].numpy()
__lowerCAmelCase = image_processor(__a , return_tensors="tf" )["pixel_values"].numpy()
__lowerCAmelCase = processor(images=__a , return_tensors="tf" )["pixel_values"].numpy()
self.assertTrue(np.allclose(__a , __a ) )
self.assertTrue(np.allclose(__a , __a ) )
self.assertTrue(np.allclose(__a , __a ) )
| 259
|
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class _UpperCamelCase ( metaclass=lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] =["""transformers""", """torch""", """note_seq"""]
def __init__( self , *__a , **__a ):
requires_backends(self , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
@classmethod
def snake_case ( cls , *__a , **__a ):
requires_backends(cls , ["transformers", "torch", "note_seq"] )
| 259
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : List[Any] = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__snake_case : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 134
|
'''simple docstring'''
import os
from math import logaa
def __lowerCamelCase ( __snake_case : str = "base_exp.txt" ) -> int:
"""simple docstring"""
A__ : float =0
A__ : Optional[int] =0
for i, line in enumerate(open(os.path.join(os.path.dirname(__snake_case ), __snake_case ) ) ):
A__ , A__ : Union[str, Any] =list(map(__snake_case, line.split(""",""" ) ) )
if x * logaa(__snake_case ) > largest:
A__ : List[str] =x * logaa(__snake_case )
A__ : Any =i + 1
return result
if __name__ == "__main__":
print(solution())
| 134
| 1
|
"""simple docstring"""
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
a_ = logging.get_logger(__name__) # pylint: disable=invalid-name
def a__ ( __lowercase ) -> Optional[int]:
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(SCREAMING_SNAKE_CASE_ ):
return ext
raise Exception(
f"""Unable to determine file format from file extension {path}. """
f"""Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}""" )
def a__ ( __lowercase ) -> Union[str, Any]:
_A = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
_A = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
_A = PipelineDataFormat.from_str(
format=SCREAMING_SNAKE_CASE_ , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
class snake_case ( lowerCamelCase_):
def __init__( self : List[str] , a__ : List[Any] , a__ : List[str] ) -> List[str]:
'''simple docstring'''
_A = nlp
_A = reader
@staticmethod
def a_ ( a__ : str ) -> Optional[Any]:
'''simple docstring'''
_A = parser.add_parser("run" , help="Run a pipeline through the CLI" )
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run" )
run_parser.add_argument("--input" , type=lowerCAmelCase__ , help="Path to the file to use for inference" )
run_parser.add_argument("--output" , type=lowerCAmelCase__ , help="Path to the file that will be used post to write results." )
run_parser.add_argument("--model" , type=lowerCAmelCase__ , help="Name or path to the model to instantiate." )
run_parser.add_argument("--config" , type=lowerCAmelCase__ , help="Name or path to the model\'s config to instantiate." )
run_parser.add_argument(
"--tokenizer" , type=lowerCAmelCase__ , help="Name of the tokenizer to use. (default: same as the model name)" )
run_parser.add_argument(
"--column" , type=lowerCAmelCase__ , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=lowerCAmelCase__ , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=lowerCAmelCase__ , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file." )
run_parser.set_defaults(func=lowerCAmelCase__ )
def a_ ( self : int ) -> Any:
'''simple docstring'''
_A , _A = self._nlp, []
for entry in self._reader:
_A = nlp(**lowerCAmelCase__ ) if self._reader.is_multi_columns else nlp(lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
outputs.append(lowerCAmelCase__ )
else:
outputs += output
# Saving data
if self._nlp.binary_output:
_A = self._reader.save_binary(lowerCAmelCase__ )
logger.warning(F"""Current pipeline requires output to be in binary format, saving at {binary_path}""" )
else:
self._reader.save(lowerCAmelCase__ )
| 366
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a_ = {
"configuration_xlm_roberta": [
"XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP",
"XLMRobertaConfig",
"XLMRobertaOnnxConfig",
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["XLMRobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMRobertaForCausalLM",
"XLMRobertaForMaskedLM",
"XLMRobertaForMultipleChoice",
"XLMRobertaForQuestionAnswering",
"XLMRobertaForSequenceClassification",
"XLMRobertaForTokenClassification",
"XLMRobertaModel",
"XLMRobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMRobertaForCausalLM",
"TFXLMRobertaForMaskedLM",
"TFXLMRobertaForMultipleChoice",
"TFXLMRobertaForQuestionAnswering",
"TFXLMRobertaForSequenceClassification",
"TFXLMRobertaForTokenClassification",
"TFXLMRobertaModel",
"TFXLMRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"FlaxXLMRobertaForMaskedLM",
"FlaxXLMRobertaForCausalLM",
"FlaxXLMRobertaForMultipleChoice",
"FlaxXLMRobertaForQuestionAnswering",
"FlaxXLMRobertaForSequenceClassification",
"FlaxXLMRobertaForTokenClassification",
"FlaxXLMRobertaModel",
"FlaxXLMRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 163
| 0
|
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
UpperCAmelCase = TaConfig.from_json_file(UpperCamelCase__ )
print(F"""Building PyTorch model from configuration: {config}""" )
UpperCAmelCase = TaForConditionalGeneration(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__A : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 273
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_5_0, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class A_ (unittest.TestCase ):
def _lowercase ( self ):
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=_A , )
assert hasattr(self , '''env''' )
def _lowercase ( self , _A=1 ):
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _lowercase ( self , _A ):
'''simple docstring'''
TrainingJobAnalytics(_A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
def _lowercase ( self ):
'''simple docstring'''
UpperCAmelCase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
UpperCAmelCase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 9_9_9_9_9_9 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _A )
| 273
| 1
|
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class A ( __UpperCAmelCase , unittest.TestCase ):
__snake_case = FlaxAutoencoderKL
@property
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = 4
lowerCAmelCase_ = 3
lowerCAmelCase_ = (32, 32)
lowerCAmelCase_ = jax.random.PRNGKey(0 )
lowerCAmelCase_ = jax.random.uniform(UpperCamelCase__, ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def SCREAMING_SNAKE_CASE__ ( self ):
"""simple docstring"""
lowerCAmelCase_ = {
'''block_out_channels''': [32, 64],
'''in_channels''': 3,
'''out_channels''': 3,
'''down_block_types''': ['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''],
'''up_block_types''': ['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''],
'''latent_channels''': 4,
}
lowerCAmelCase_ = self.dummy_input
return init_dict, inputs_dict
| 362
|
_A = {
"joule": 1.0,
"kilojoule": 1_000,
"megajoule": 1_000_000,
"gigajoule": 1_000_000_000,
"wattsecond": 1.0,
"watthour": 3_600,
"kilowatthour": 3_600_000,
"newtonmeter": 1.0,
"calorie_nutr": 4_186.8,
"kilocalorie_nutr": 4_186_800.00,
"electronvolt": 1.602_176_634e-19,
"britishthermalunit_it": 1_055.05_585,
"footpound": 1.355_818,
}
def __UpperCamelCase ( _A , _A , _A ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
lowerCAmelCase_ = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(_A )}"
)
raise ValueError(_A )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 167
| 0
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float:
if digit_amount > 0:
return round(number - int(UpperCAmelCase ) , UpperCAmelCase )
return number - int(UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 69
|
"""simple docstring"""
def UpperCAmelCase ( UpperCAmelCase ) -> list:
if len(UpperCAmelCase ) <= 1:
return [tuple(UpperCAmelCase )]
snake_case_ = []
def generate(UpperCAmelCase , UpperCAmelCase ):
snake_case_ = [0] * n
res.append(tuple(UpperCAmelCase ) )
snake_case_ = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
snake_case_ , snake_case_ = arr[i], arr[0]
else:
snake_case_ , snake_case_ = arr[i], arr[c[i]]
res.append(tuple(UpperCAmelCase ) )
c[i] += 1
snake_case_ = 0
else:
snake_case_ = 0
i += 1
generate(len(UpperCAmelCase ) , UpperCAmelCase )
return res
if __name__ == "__main__":
__UpperCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
__UpperCamelCase = [int(item) for item in user_input.split(''',''')]
print(heaps(arr))
| 69
| 1
|
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
__UpperCAmelCase = {
'''vocab_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt''',
'''allenai/longformer-large-4096''': (
'''https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'''
),
},
}
__UpperCAmelCase = {
'''allenai/longformer-base-4096''': 4_096,
'''allenai/longformer-large-4096''': 4_096,
'''allenai/longformer-large-4096-finetuned-triviaqa''': 4_096,
'''allenai/longformer-base-4096-extra.pos.embd.only''': 4_096,
'''allenai/longformer-large-4096-extra.pos.embd.only''': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def UpperCamelCase ( ) -> Union[str, Any]:
UpperCamelCase : Dict = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
UpperCamelCase : Tuple = bs[:]
UpperCamelCase : List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case__ )
cs.append(2**8 + n )
n += 1
UpperCamelCase : Optional[Any] = [chr(snake_case__ ) for n in cs]
return dict(zip(snake_case__ , snake_case__ ) )
def UpperCamelCase ( snake_case__ : Tuple ) -> Tuple:
UpperCamelCase : int = set()
UpperCamelCase : Any = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCamelCase : str = char
return pairs
class lowerCAmelCase_ ( a__ ):
UpperCAmelCase__ : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase__ : str = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ : List[str] = ["input_ids", "attention_mask"]
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_="replace", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="</s>", SCREAMING_SNAKE_CASE_="<s>", SCREAMING_SNAKE_CASE_="<unk>", SCREAMING_SNAKE_CASE_="<pad>", SCREAMING_SNAKE_CASE_="<mask>", SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> Dict:
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else bos_token
UpperCamelCase : Dict = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else eos_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else sep_token
UpperCamelCase : str = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else cls_token
UpperCamelCase : int = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else unk_token
UpperCamelCase : Optional[int] = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCamelCase : Any = AddedToken(SCREAMING_SNAKE_CASE_, lstrip=SCREAMING_SNAKE_CASE_, rstrip=SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) else mask_token
super().__init__(
errors=SCREAMING_SNAKE_CASE_, bos_token=SCREAMING_SNAKE_CASE_, eos_token=SCREAMING_SNAKE_CASE_, unk_token=SCREAMING_SNAKE_CASE_, sep_token=SCREAMING_SNAKE_CASE_, cls_token=SCREAMING_SNAKE_CASE_, pad_token=SCREAMING_SNAKE_CASE_, mask_token=SCREAMING_SNAKE_CASE_, add_prefix_space=SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_, )
with open(SCREAMING_SNAKE_CASE_, encoding='utf-8' ) as vocab_handle:
UpperCamelCase : Union[str, Any] = json.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = {v: k for k, v in self.encoder.items()}
UpperCamelCase : Tuple = errors # how to handle errors in decoding
UpperCamelCase : Optional[int] = bytes_to_unicode()
UpperCamelCase : Any = {v: k for k, v in self.byte_encoder.items()}
with open(SCREAMING_SNAKE_CASE_, encoding='utf-8' ) as merges_handle:
UpperCamelCase : Optional[int] = merges_handle.read().split('\n' )[1:-1]
UpperCamelCase : Any = [tuple(merge.split() ) for merge in bpe_merges]
UpperCamelCase : List[str] = dict(zip(SCREAMING_SNAKE_CASE_, range(len(SCREAMING_SNAKE_CASE_ ) ) ) )
UpperCamelCase : int = {}
UpperCamelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCamelCase : int = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def snake_case_ ( self ) -> Optional[Any]:
return len(self.encoder )
def snake_case_ ( self ) -> Any:
return dict(self.encoder, **self.added_tokens_encoder )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
if token in self.cache:
return self.cache[token]
UpperCamelCase : int = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
if not pairs:
return token
while True:
UpperCamelCase : Dict = min(SCREAMING_SNAKE_CASE_, key=lambda SCREAMING_SNAKE_CASE_ : self.bpe_ranks.get(SCREAMING_SNAKE_CASE_, float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
UpperCamelCase , UpperCamelCase : Any = bigram
UpperCamelCase : Dict = []
UpperCamelCase : Dict = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
try:
UpperCamelCase : Any = word.index(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCamelCase : int = j
if word[i] == first and i < len(SCREAMING_SNAKE_CASE_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCamelCase : Optional[Any] = tuple(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = new_word
if len(SCREAMING_SNAKE_CASE_ ) == 1:
break
else:
UpperCamelCase : Optional[Any] = get_pairs(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = ' '.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[int] = word
return word
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCamelCase : Dict = []
for token in re.findall(self.pat, SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : Union[str, Any] = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(SCREAMING_SNAKE_CASE_ ).split(' ' ) )
return bpe_tokens
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Tuple:
return self.encoder.get(SCREAMING_SNAKE_CASE_, self.encoder.get(self.unk_token ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return self.decoder.get(SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
UpperCamelCase : int = ''.join(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8', errors=self.errors )
return text
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> Tuple[str]:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase : str = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase : Any = os.path.join(
SCREAMING_SNAKE_CASE_, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(SCREAMING_SNAKE_CASE_, 'w', encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=SCREAMING_SNAKE_CASE_, ensure_ascii=SCREAMING_SNAKE_CASE_ ) + '\n' )
UpperCamelCase : Dict = 0
with open(SCREAMING_SNAKE_CASE_, 'w', encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda SCREAMING_SNAKE_CASE_ : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
UpperCamelCase : Tuple = token_index
writer.write(' '.join(SCREAMING_SNAKE_CASE_ ) + '\n' )
index += 1
return vocab_file, merge_file
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase : Any = [self.cls_token_id]
UpperCamelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=SCREAMING_SNAKE_CASE_, token_ids_a=SCREAMING_SNAKE_CASE_, already_has_special_tokens=SCREAMING_SNAKE_CASE_ )
if token_ids_a is None:
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
return [1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1, 1] + ([0] * len(SCREAMING_SNAKE_CASE_ )) + [1]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None ) -> List[int]:
UpperCamelCase : List[Any] = [self.sep_token_id]
UpperCamelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
UpperCamelCase : Tuple = kwargs.pop('add_prefix_space', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(SCREAMING_SNAKE_CASE_ ) > 0 and not text[0].isspace()):
UpperCamelCase : Dict = ' ' + text
return (text, kwargs)
| 103
|
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def UpperCamelCase ( ) -> Tuple:
UpperCamelCase : Any = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png'
UpperCamelCase : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert('RGB' )
return image
def UpperCamelCase ( snake_case__ : int ) -> List[Any]:
UpperCamelCase : Optional[int] = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.weight""", F"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm1.bias""", F"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.weight""", F"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.norm2.bias""", F"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.qkv.weight""", F"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.weight""", F"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((F"""visual_encoder.blocks.{i}.attn.proj.bias""", F"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc1.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.weight""", F"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((F"""visual_encoder.blocks.{i}.mlp.fc2.bias""", F"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') )
# fmt: on
return rename_keys
def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Optional[int] ) -> Optional[int]:
UpperCamelCase : Dict = dct.pop(snake_case__ )
UpperCamelCase : str = val
def UpperCamelCase ( snake_case__ : str , snake_case__ : Union[str, Any] ) -> Optional[int]:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
UpperCamelCase : Optional[Any] = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.q_bias""" )
UpperCamelCase : int = state_dict.pop(F"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
UpperCamelCase : int = torch.cat((q_bias, torch.zeros_like(snake_case__ , requires_grad=snake_case__ ), v_bias) )
UpperCamelCase : Tuple = qkv_bias
def UpperCamelCase ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Dict:
UpperCamelCase : str = 364 if 'coco' in model_name else 224
UpperCamelCase : Union[str, Any] = BlipaVisionConfig(image_size=snake_case__ ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
UpperCamelCase : List[Any] = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=snake_case__ ).to_dict()
elif "opt-6.7b" in model_name:
UpperCamelCase : int = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=snake_case__ ).to_dict()
elif "t5-xl" in model_name:
UpperCamelCase : List[str] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
UpperCamelCase : int = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
UpperCamelCase : Any = BlipaConfig(vision_config=snake_case__ , text_config=snake_case__ )
return config, image_size
@torch.no_grad()
def UpperCamelCase ( snake_case__ : int , snake_case__ : Dict=None , snake_case__ : int=False ) -> List[Any]:
UpperCamelCase : str = (
AutoTokenizer.from_pretrained('facebook/opt-2.7b' )
if 'opt' in model_name
else AutoTokenizer.from_pretrained('google/flan-t5-xl' )
)
UpperCamelCase : int = tokenizer('\n' , add_special_tokens=snake_case__ ).input_ids[0]
UpperCamelCase , UpperCamelCase : Union[str, Any] = get_blipa_config(snake_case__ , eos_token_id=snake_case__ )
UpperCamelCase : Dict = BlipaForConditionalGeneration(snake_case__ ).eval()
UpperCamelCase : Optional[Any] = {
'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'),
'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'),
'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'),
'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'),
'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'),
'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'),
'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'),
}
UpperCamelCase , UpperCamelCase : Optional[Any] = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
UpperCamelCase : List[str] = 'cuda' if torch.cuda.is_available() else 'cpu'
UpperCamelCase , UpperCamelCase , UpperCamelCase : Tuple = load_model_and_preprocess(
name=snake_case__ , model_type=snake_case__ , is_eval=snake_case__ , device=snake_case__ )
original_model.eval()
print('Done!' )
# update state dict keys
UpperCamelCase : List[Any] = original_model.state_dict()
UpperCamelCase : Tuple = create_rename_keys(snake_case__ )
for src, dest in rename_keys:
rename_key(snake_case__ , snake_case__ , snake_case__ )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
UpperCamelCase : Optional[Any] = state_dict.pop(snake_case__ )
if key.startswith('Qformer.bert' ):
UpperCamelCase : List[str] = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
UpperCamelCase : Tuple = key.replace('self' , 'attention' )
if "opt_proj" in key:
UpperCamelCase : Union[str, Any] = key.replace('opt_proj' , 'language_projection' )
if "t5_proj" in key:
UpperCamelCase : Optional[Any] = key.replace('t5_proj' , 'language_projection' )
if key.startswith('opt' ):
UpperCamelCase : Dict = key.replace('opt' , 'language' )
if key.startswith('t5' ):
UpperCamelCase : Dict = key.replace('t5' , 'language' )
UpperCamelCase : Optional[int] = val
# read in qv biases
read_in_q_v_bias(snake_case__ , snake_case__ )
UpperCamelCase , UpperCamelCase : Any = hf_model.load_state_dict(snake_case__ , strict=snake_case__ )
assert len(snake_case__ ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
UpperCamelCase : List[str] = load_demo_image()
UpperCamelCase : str = vis_processors['eval'](snake_case__ ).unsqueeze(0 ).to(snake_case__ )
UpperCamelCase : Any = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(snake_case__ )
# create processor
UpperCamelCase : Optional[Any] = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=snake_case__ , image_std=snake_case__ )
UpperCamelCase : Any = BlipaProcessor(image_processor=snake_case__ , tokenizer=snake_case__ )
UpperCamelCase : Optional[int] = processor(images=snake_case__ , return_tensors='pt' ).pixel_values.to(snake_case__ )
# make sure processor creates exact same pixel values
assert torch.allclose(snake_case__ , snake_case__ )
original_model.to(snake_case__ )
hf_model.to(snake_case__ )
with torch.no_grad():
if "opt" in model_name:
UpperCamelCase : Tuple = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits
UpperCamelCase : str = hf_model(snake_case__ , snake_case__ ).logits
else:
UpperCamelCase : Tuple = original_model(
{'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits
UpperCamelCase : List[Any] = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
UpperCamelCase : Optional[int] = hf_model(snake_case__ , snake_case__ , labels=snake_case__ ).logits
assert original_logits.shape == logits.shape
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
UpperCamelCase : List[str] = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=snake_case__ )
assert torch.allclose(logits[0, :3, :3] , snake_case__ , atol=1E-4 )
elif model_name == "blip2-flan-t5-xl-coco":
UpperCamelCase : Union[str, Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=snake_case__ )
else:
# cast to same type
UpperCamelCase : Optional[int] = logits.dtype
assert torch.allclose(original_logits.to(snake_case__ ) , snake_case__ , atol=1E-2 )
print('Looks ok!' )
print('Generating a caption...' )
UpperCamelCase : Optional[int] = ''
UpperCamelCase : Union[str, Any] = tokenizer(snake_case__ , return_tensors='pt' ).input_ids.to(snake_case__ )
UpperCamelCase : str = original_model.generate({'image': original_pixel_values} )
UpperCamelCase : str = hf_model.generate(
snake_case__ , snake_case__ , do_sample=snake_case__ , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print('Original generation:' , snake_case__ )
UpperCamelCase : Optional[int] = input_ids.shape[1]
UpperCamelCase : Union[str, Any] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=snake_case__ )
UpperCamelCase : Dict = [text.strip() for text in output_text]
print('HF generation:' , snake_case__ )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if push_to_hub:
processor.push_to_hub(F"""nielsr/{model_name}""" )
hf_model.push_to_hub(F"""nielsr/{model_name}""" )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
__UpperCAmelCase = [
'''blip2-opt-2.7b''',
'''blip2-opt-6.7b''',
'''blip2-opt-2.7b-coco''',
'''blip2-opt-6.7b-coco''',
'''blip2-flan-t5-xl''',
'''blip2-flan-t5-xl-coco''',
'''blip2-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''blip2-opt-2.7b''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
__UpperCAmelCase = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 103
| 1
|
from __future__ import annotations
def _A ( SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[int] , SCREAMING_SNAKE_CASE__ : list[list[str]] , SCREAMING_SNAKE_CASE__ : int , ):
UpperCamelCase :Dict = len(SCREAMING_SNAKE_CASE__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(SCREAMING_SNAKE_CASE__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , )
def _A ( SCREAMING_SNAKE_CASE__ : int ):
UpperCamelCase :list[list[str]] = []
depth_first_search([] , [] , [] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# Print all the boards
for board in boards:
for column in board:
print(SCREAMING_SNAKE_CASE__ )
print('''''' )
print(len(SCREAMING_SNAKE_CASE__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 259
|
import sys
def _A ( SCREAMING_SNAKE_CASE__ : List[str] ):
UpperCamelCase :Any = len(SCREAMING_SNAKE_CASE__ )
UpperCamelCase :Any = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
UpperCamelCase :List[Any] = [[0 for x in range(SCREAMING_SNAKE_CASE__ )] for x in range(SCREAMING_SNAKE_CASE__ )]
for chain_length in range(2 , SCREAMING_SNAKE_CASE__ ):
for a in range(1 , n - chain_length + 1 ):
UpperCamelCase :Optional[Any] = a + chain_length - 1
UpperCamelCase :int = sys.maxsize
for c in range(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCamelCase :Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
UpperCamelCase :int = cost
UpperCamelCase :List[str] = c
return matrix, sol
def _A ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ):
if i == j:
print('''A''' + str(SCREAMING_SNAKE_CASE__ ) , end=''' ''' )
else:
print('''(''' , end=''' ''' )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , optimal_solution[i][j] + 1 , SCREAMING_SNAKE_CASE__ )
print(''')''' , end=''' ''' )
def _A ( ):
UpperCamelCase :Optional[int] = [30, 35, 15, 5, 10, 20, 25]
UpperCamelCase :Optional[Any] = len(SCREAMING_SNAKE_CASE__ )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
UpperCamelCase , UpperCamelCase :Dict = matrix_chain_order(SCREAMING_SNAKE_CASE__ )
print('''No. of Operation required: ''' + str(matrix[1][n - 1] ) )
print_optiomal_solution(SCREAMING_SNAKE_CASE__ , 1 , n - 1 )
if __name__ == "__main__":
main()
| 259
| 1
|
import colorsys
from PIL import Image # type: ignore
def lowerCamelCase__ ( a , a , a ) -> float:
_A: List[str] = x
_A: Any = y
for step in range(a ): # noqa: B007
_A: List[Any] = a * a - b * b + x
_A: Tuple = 2 * a * b + y
_A: Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCamelCase__ ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return (2_55, 2_55, 2_55)
def lowerCamelCase__ ( a ) -> tuple:
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_55 ) for i in colorsys.hsv_to_rgb(a , 1 , 1 ) )
def lowerCamelCase__ ( a = 8_00 , a = 6_00 , a = -0.6 , a = 0 , a = 3.2 , a = 50 , a = True , ) -> Image.Image:
_A: List[str] = Image.new('''RGB''' , (image_width, image_height) )
_A: int = img.load()
# loop through the image-coordinates
for image_x in range(a ):
for image_y in range(a ):
# determine the figure-coordinates based on the image-coordinates
_A: List[str] = figure_width / image_width * image_height
_A: List[str] = figure_center_x + (image_x / image_width - 0.5) * figure_width
_A: Optional[int] = figure_center_y + (image_y / image_height - 0.5) * figure_height
_A: str = get_distance(a , a , a )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_A: Optional[Any] = get_color_coded_rgb(a )
else:
_A: str = get_black_and_white_rgb(a )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
UpperCAmelCase__ : List[Any] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 301
|
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCAmelCase ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __lt__( self : Dict , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : int , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
return self[-1] == other[-1]
def lowerCamelCase__ ( a ) -> list:
_A: list[Stack] = []
# sort into stacks
for element in collection:
_A: Any = Stack([element] )
_A: Optional[Any] = bisect_left(a , a )
if i != len(a ):
stacks[i].append(a )
else:
stacks.append(a )
# use a heap-based merge to merge stack efficiently
_A: Tuple = merge(*(reversed(a ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCAmelCase__ : Tuple = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase__ : Optional[Any] = [int(item) for item in user_input.split(',')]
print(patience_sort(unsorted))
| 301
| 1
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def __snake_case( _lowerCAmelCase ) -> Dict:
snake_case__ : List[Any] = split_dict._to_yaml_list()
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
snake_case__ : List[Any] = SplitDict._from_yaml_list(_lowerCAmelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case__ : Optional[int] = None
# the split name of split_dict takes over the name of the split info object
snake_case__ : List[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=_lowerCAmelCase ), SplitInfo(dataset_name="""my_dataset""" )] )
def __snake_case( _lowerCAmelCase ) -> Dict:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
snake_case__ : Any = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 35
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : Union[str, Any] = 3_8_4
if "tiny" in model_name:
UpperCAmelCase__ : int = [3, 3, 9, 3]
UpperCAmelCase__ : Union[str, Any] = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "small" in model_name:
UpperCAmelCase__ : Optional[int] = [3, 3, 2_7, 3]
UpperCAmelCase__ : Dict = [9_6, 1_9_2, 3_8_4, 7_6_8]
if "base" in model_name:
UpperCAmelCase__ : List[str] = [3, 3, 2_7, 3]
UpperCAmelCase__ : str = [1_2_8, 2_5_6, 5_1_2, 1_0_2_4]
UpperCAmelCase__ : Optional[int] = 5_1_2
if "large" in model_name:
UpperCAmelCase__ : Optional[Any] = [3, 3, 2_7, 3]
UpperCAmelCase__ : Optional[int] = [1_9_2, 3_8_4, 7_6_8, 1_5_3_6]
UpperCAmelCase__ : Optional[int] = 7_6_8
if "xlarge" in model_name:
UpperCAmelCase__ : Tuple = [3, 3, 2_7, 3]
UpperCAmelCase__ : int = [2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8]
UpperCAmelCase__ : int = 1_0_2_4
# set label information
UpperCAmelCase__ : Tuple = 1_5_0
UpperCAmelCase__ : Union[str, Any] = """huggingface/label-files"""
UpperCAmelCase__ : Tuple = """ade20k-id2label.json"""
UpperCAmelCase__ : Dict = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase__ : str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
UpperCAmelCase__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ : str = ConvNextConfig(
depths=UpperCamelCase__ , hidden_sizes=UpperCamelCase__ , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
UpperCAmelCase__ : Union[str, Any] = UperNetConfig(
backbone_config=UpperCamelCase__ , auxiliary_in_channels=UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , )
return config
def _UpperCamelCase ( UpperCamelCase__ ):
UpperCAmelCase__ : str = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.stages.{i}.{j}.gamma''', f'''backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.depthwise_conv.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.dwconv.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.norm.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.layernorm.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv1.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.weight''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight''') )
rename_keys.append((f'''backbone.stages.{i}.{j}.pointwise_conv2.bias''', f'''backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias''') )
if i > 0:
rename_keys.append((f'''backbone.downsample_layers.{i}.0.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.0.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.0.bias''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.weight''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.weight''') )
rename_keys.append((f'''backbone.downsample_layers.{i}.1.bias''', f'''backbone.encoder.stages.{i}.downsampling_layer.1.bias''') )
rename_keys.append((f'''backbone.norm{i}.weight''', f'''backbone.hidden_states_norms.stage{i+1}.weight''') )
rename_keys.append((f'''backbone.norm{i}.bias''', f'''backbone.hidden_states_norms.stage{i+1}.bias''') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : Tuple = dct.pop(UpperCamelCase__ )
UpperCAmelCase__ : Dict = val
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCAmelCase__ : List[str] = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
UpperCAmelCase__ : Tuple = model_name_to_url[model_name]
UpperCAmelCase__ : int = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location="""cpu""" )["""state_dict"""]
UpperCAmelCase__ : int = get_upernet_config(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = UperNetForSemanticSegmentation(UpperCamelCase__ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
UpperCAmelCase__ : Optional[Any] = state_dict.pop(UpperCamelCase__ )
if "bn" in key:
UpperCAmelCase__ : List[str] = key.replace("""bn""" , """batch_norm""" )
UpperCAmelCase__ : List[Any] = val
# rename keys
UpperCAmelCase__ : Any = create_rename_keys(UpperCamelCase__ )
for src, dest in rename_keys:
rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify on image
UpperCAmelCase__ : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
UpperCAmelCase__ : Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ).convert("""RGB""" )
UpperCAmelCase__ : Optional[int] = SegformerImageProcessor()
UpperCAmelCase__ : Dict = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
UpperCAmelCase__ : Dict = model(UpperCamelCase__ )
if model_name == "upernet-convnext-tiny":
UpperCAmelCase__ : Any = torch.tensor(
[[-8.81_10, -8.81_10, -8.65_21], [-8.81_10, -8.81_10, -8.65_21], [-8.77_46, -8.77_46, -8.61_30]] )
elif model_name == "upernet-convnext-small":
UpperCAmelCase__ : Dict = torch.tensor(
[[-8.82_36, -8.82_36, -8.67_71], [-8.82_36, -8.82_36, -8.67_71], [-8.76_38, -8.76_38, -8.62_40]] )
elif model_name == "upernet-convnext-base":
UpperCAmelCase__ : Optional[Any] = torch.tensor(
[[-8.85_58, -8.85_58, -8.69_05], [-8.85_58, -8.85_58, -8.69_05], [-8.76_69, -8.76_69, -8.60_21]] )
elif model_name == "upernet-convnext-large":
UpperCAmelCase__ : str = torch.tensor(
[[-8.66_60, -8.66_60, -8.62_10], [-8.66_60, -8.66_60, -8.62_10], [-8.63_10, -8.63_10, -8.59_64]] )
elif model_name == "upernet-convnext-xlarge":
UpperCAmelCase__ : List[str] = torch.tensor(
[[-8.49_80, -8.49_80, -8.39_77], [-8.49_80, -8.49_80, -8.39_77], [-8.43_79, -8.43_79, -8.34_12]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , UpperCamelCase__ , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'''Pushing model and processor for {model_name} to hub''' )
model.push_to_hub(f'''openmmlab/{model_name}''' )
processor.push_to_hub(f'''openmmlab/{model_name}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='upernet-convnext-tiny',
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ['tiny', 'small', 'base', 'large', 'xlarge']],
help='Name of the ConvNext UperNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__A =parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 163
| 0
|
"""simple docstring"""
import gc
import unittest
from parameterized import parameterized
from diffusers import FlaxUNetaDConditionModel
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import load_hf_numpy, require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
@slow
@require_flax
class A__ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[int]) -> Optional[Any]:
"""simple docstring"""
return F"""gaussian_noise_s={seed}_shape={'_'.join([str(_SCREAMING_SNAKE_CASE) for s in shape])}.npy"""
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
def _SCREAMING_SNAKE_CASE ( self: Tuple , _SCREAMING_SNAKE_CASE: List[str]=0 , _SCREAMING_SNAKE_CASE: str=(4, 4, 64, 64) , _SCREAMING_SNAKE_CASE: Union[str, Any]=False) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase : Optional[int] = jnp.array(load_hf_numpy(self.get_file_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)) , dtype=_SCREAMING_SNAKE_CASE)
return image
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Optional[int]=False , _SCREAMING_SNAKE_CASE: Any="CompVis/stable-diffusion-v1-4") -> Any:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase : Union[str, Any] = "bf16" if fpaa else None
__lowerCAmelCase , __lowerCAmelCase : List[str] = FlaxUNetaDConditionModel.from_pretrained(
_SCREAMING_SNAKE_CASE , subfolder="unet" , dtype=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE)
return model, params
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Tuple=0 , _SCREAMING_SNAKE_CASE: Optional[int]=(4, 77, 768) , _SCREAMING_SNAKE_CASE: int=False) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = jnp.bfloataa if fpaa else jnp.floataa
__lowerCAmelCase : str = jnp.array(load_hf_numpy(self.get_file_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)) , dtype=_SCREAMING_SNAKE_CASE)
return hidden_states
@parameterized.expand(
[
# fmt: off
[83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
[17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
[8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
[3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
# fmt: on
])
def _SCREAMING_SNAKE_CASE ( self: Optional[int] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: List[str]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : str = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4" , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = self.get_latents(_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : int = self.get_encoder_hidden_states(_SCREAMING_SNAKE_CASE , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = model.apply(
{"params": params} , _SCREAMING_SNAKE_CASE , jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.intaa) , encoder_hidden_states=_SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase : List[Any] = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
__lowerCAmelCase : List[Any] = jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, in the same hardware
assert jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
[17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
[8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
[3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
# fmt: on
])
def _SCREAMING_SNAKE_CASE ( self: Any , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase : Any = self.get_unet_model(model_id="stabilityai/stable-diffusion-2" , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Tuple = self.get_latents(_SCREAMING_SNAKE_CASE , shape=(4, 4, 96, 96) , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[str] = self.get_encoder_hidden_states(_SCREAMING_SNAKE_CASE , shape=(4, 77, 1024) , fpaa=_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : List[Any] = model.apply(
{"params": params} , _SCREAMING_SNAKE_CASE , jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.intaa) , encoder_hidden_states=_SCREAMING_SNAKE_CASE , ).sample
assert sample.shape == latents.shape
__lowerCAmelCase : int = jnp.asarray(jax.device_get((sample[-1, -2:, -2:, :2].flatten())) , dtype=jnp.floataa)
__lowerCAmelCase : Optional[int] = jnp.array(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa)
# Found torch (float16) and flax (bfloat16) outputs to be within this tolerance, on the same hardware
assert jnp.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-2)
| 58
|
"""simple docstring"""
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
# TODO Update this
__snake_case : Optional[int] = {
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'esm'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any]=None , _SCREAMING_SNAKE_CASE: str=None , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: int=768 , _SCREAMING_SNAKE_CASE: Any=12 , _SCREAMING_SNAKE_CASE: Optional[Any]=12 , _SCREAMING_SNAKE_CASE: Optional[int]=3072 , _SCREAMING_SNAKE_CASE: List[Any]=0.1 , _SCREAMING_SNAKE_CASE: Tuple=0.1 , _SCREAMING_SNAKE_CASE: Optional[Any]=1026 , _SCREAMING_SNAKE_CASE: List[Any]=0.02 , _SCREAMING_SNAKE_CASE: Optional[Any]=1e-12 , _SCREAMING_SNAKE_CASE: List[Any]="absolute" , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: Optional[Any]=False , _SCREAMING_SNAKE_CASE: List[Any]=False , _SCREAMING_SNAKE_CASE: Optional[int]=None , _SCREAMING_SNAKE_CASE: Tuple=None , **_SCREAMING_SNAKE_CASE: str , ) -> Tuple:
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , mask_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : Dict = num_hidden_layers
__lowerCAmelCase : List[str] = num_attention_heads
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : int = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : List[Any] = position_embedding_type
__lowerCAmelCase : Optional[Any] = use_cache
__lowerCAmelCase : List[str] = emb_layer_norm_before
__lowerCAmelCase : Tuple = token_dropout
__lowerCAmelCase : List[str] = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("No esmfold_config supplied for folding model, using default values.")
__lowerCAmelCase : str = EsmFoldConfig()
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : int = EsmFoldConfig(**_SCREAMING_SNAKE_CASE)
__lowerCAmelCase : Dict = esmfold_config
if vocab_list is None:
logger.warning("No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!")
__lowerCAmelCase : List[Any] = get_default_vocab_list()
else:
__lowerCAmelCase : Tuple = vocab_list
else:
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : List[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , "use_esm_attn_map" , _SCREAMING_SNAKE_CASE):
raise ValueError("The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!")
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : List[str] = super().to_dict()
if isinstance(self.esmfold_config , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = self.esmfold_config.to_dict()
return output
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = None
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Any:
"""simple docstring"""
if self.trunk is None:
__lowerCAmelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : List[str] = TrunkConfig(**self.trunk)
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : Any = asdict(self)
__lowerCAmelCase : Tuple = self.trunk.to_dict()
return output
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 4_8
SCREAMING_SNAKE_CASE = 1_0_2_4
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = 3_2
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = 0
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = None
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Optional[int]:
"""simple docstring"""
if self.structure_module is None:
__lowerCAmelCase : Optional[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , _SCREAMING_SNAKE_CASE):
__lowerCAmelCase : Any = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(F"""`max_recycles` should be positive, got {self.max_recycles}.""")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"
F""" {self.sequence_state_dim} and {self.sequence_state_dim}.""")
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"
F""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""")
__lowerCAmelCase : int = self.sequence_state_dim // self.sequence_head_width
__lowerCAmelCase : Optional[int] = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"
F""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""")
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"
F""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""")
if self.pairwise_state_dim % 2 != 0:
raise ValueError(F"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""")
if self.dropout >= 0.4:
raise ValueError(F"""`dropout` should not be greater than 0.4, got {self.dropout}.""")
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
__lowerCAmelCase : int = asdict(self)
__lowerCAmelCase : Union[str, Any] = self.structure_module.to_dict()
return output
@dataclass
class A__ :
'''simple docstring'''
SCREAMING_SNAKE_CASE = 3_8_4
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = 1_6
SCREAMING_SNAKE_CASE = 1_2_8
SCREAMING_SNAKE_CASE = 1_2
SCREAMING_SNAKE_CASE = 4
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 0.1
SCREAMING_SNAKE_CASE = 8
SCREAMING_SNAKE_CASE = 1
SCREAMING_SNAKE_CASE = 2
SCREAMING_SNAKE_CASE = 7
SCREAMING_SNAKE_CASE = 1_0
SCREAMING_SNAKE_CASE = 1e-8
SCREAMING_SNAKE_CASE = 1e5
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Union[str, Any]:
"""simple docstring"""
return asdict(self)
def _lowercase ( ) -> List[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 58
| 1
|
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('''At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training''')
# TF training parameters
a__ : Any =False
a__ : Union[str, Any] =False
def lowercase__ ( __lowercase : str ) -> Optional[Any]:
"""simple docstring"""
return TrainCommand(_UpperCAmelCase )
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
@staticmethod
def _lowerCamelCase ( __A : ArgumentParser ):
__UpperCamelCase = parser.add_parser('train' , help='CLI tool to train a model on a task.' )
train_parser.add_argument(
'--train_data' , type=_lowerCamelCase , required=_lowerCamelCase , help='path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.' , )
train_parser.add_argument(
'--column_label' , type=_lowerCamelCase , default=0 , help='Column of the dataset csv file with example labels.' )
train_parser.add_argument(
'--column_text' , type=_lowerCamelCase , default=1 , help='Column of the dataset csv file with example texts.' )
train_parser.add_argument(
'--column_id' , type=_lowerCamelCase , default=2 , help='Column of the dataset csv file with example ids.' )
train_parser.add_argument(
'--skip_first_row' , action='store_true' , help='Skip the first row of the csv file (headers).' )
train_parser.add_argument('--validation_data' , type=_lowerCamelCase , default='' , help='path to validation dataset.' )
train_parser.add_argument(
'--validation_split' , type=_lowerCamelCase , default=0.1 , help='if validation dataset is not provided, fraction of train dataset to use as validation dataset.' , )
train_parser.add_argument('--output' , type=_lowerCamelCase , default='./' , help='path to saved the trained model.' )
train_parser.add_argument(
'--task' , type=_lowerCamelCase , default='text_classification' , help='Task to train the model on.' )
train_parser.add_argument(
'--model' , type=_lowerCamelCase , default='bert-base-uncased' , help='Model\'s name or path to stored model.' )
train_parser.add_argument('--train_batch_size' , type=_lowerCamelCase , default=3_2 , help='Batch size for training.' )
train_parser.add_argument('--valid_batch_size' , type=_lowerCamelCase , default=6_4 , help='Batch size for validation.' )
train_parser.add_argument('--learning_rate' , type=_lowerCamelCase , default=3e-5 , help='Learning rate.' )
train_parser.add_argument('--adam_epsilon' , type=_lowerCamelCase , default=1e-08 , help='Epsilon for Adam optimizer.' )
train_parser.set_defaults(func=_lowerCamelCase )
def __init__( self : Any , __A : Namespace ):
__UpperCamelCase = logging.get_logger('transformers-cli/training' )
__UpperCamelCase = '''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=_lowerCamelCase )
__UpperCamelCase = args.output
__UpperCamelCase = args.column_label
__UpperCamelCase = args.column_text
__UpperCamelCase = args.column_id
self.logger.info(f'''Loading {args.task} pipeline for {args.model}''' )
if args.task == "text_classification":
__UpperCamelCase = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f'''Loading dataset from {args.train_data}''' )
__UpperCamelCase = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__UpperCamelCase = None
if args.validation_data:
self.logger.info(f'''Loading validation dataset from {args.validation_data}''' )
__UpperCamelCase = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
__UpperCamelCase = args.validation_split
__UpperCamelCase = args.train_batch_size
__UpperCamelCase = args.valid_batch_size
__UpperCamelCase = args.learning_rate
__UpperCamelCase = args.adam_epsilon
def _lowerCamelCase ( self : Optional[Any] ):
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def _lowerCamelCase ( self : Optional[Any] ):
raise NotImplementedError
def _lowerCamelCase ( self : Any ):
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 53
|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : str = {
'facebook/convnextv2-tiny-1k-224': 'https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json',
}
class lowercase ( __UpperCAmelCase , __UpperCAmelCase):
__lowerCAmelCase : List[Any] = """convnextv2"""
def __init__( self : int , _lowerCamelCase : str=3 , _lowerCamelCase : str=4 , _lowerCamelCase : List[Any]=4 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]=None , _lowerCamelCase : Optional[int]="gelu" , _lowerCamelCase : Union[str, Any]=0.02 , _lowerCamelCase : List[str]=1E-12 , _lowerCamelCase : Tuple=0.0 , _lowerCamelCase : Optional[int]=2_24 , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Optional[Any]=None , **_lowerCamelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(**_lowerCamelCase )
A_ : str = num_channels
A_ : int = patch_size
A_ : Union[str, Any] = num_stages
A_ : Any = [96, 1_92, 3_84, 7_68] if hidden_sizes is None else hidden_sizes
A_ : Any = [3, 3, 9, 3] if depths is None else depths
A_ : Optional[int] = hidden_act
A_ : Tuple = initializer_range
A_ : int = layer_norm_eps
A_ : List[Any] = drop_path_rate
A_ : Union[str, Any] = image_size
A_ : Any = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
A_ , A_ : Tuple = get_aligned_output_features_output_indices(
out_features=_lowerCamelCase , out_indices=_lowerCamelCase , stage_names=self.stage_names )
| 167
| 0
|
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
def __init__( self , UpperCAmelCase__ , UpperCAmelCase__=13 , UpperCAmelCase__=30 , UpperCAmelCase__=2 , UpperCAmelCase__=3 , UpperCAmelCase__=True , UpperCAmelCase__=True , UpperCAmelCase__=32 , UpperCAmelCase__=2 , UpperCAmelCase__=4 , UpperCAmelCase__=37 , UpperCAmelCase__="gelu" , UpperCAmelCase__=0.1 , UpperCAmelCase__=0.1 , UpperCAmelCase__=10 , UpperCAmelCase__=0.02 , UpperCAmelCase__=3 , UpperCAmelCase__=0.6 , UpperCAmelCase__=None , ):
A__ = parent
A__ = batch_size
A__ = image_size
A__ = patch_size
A__ = num_channels
A__ = is_training
A__ = use_labels
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = type_sequence_label_size
A__ = initializer_range
A__ = mask_ratio
A__ = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A__ = (image_size // patch_size) ** 2
A__ = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ):
A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A__ = self.get_config()
return config, pixel_values, labels
def __A ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = TFViTMAEModel(config=UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
A__ = TFViTMAEForPreTraining(UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
# expected sequence length = num_patches
A__ = (self.image_size // self.patch_size) ** 2
A__ = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A__ = 1
A__ = TFViTMAEForPreTraining(UpperCAmelCase__ )
A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A__ = model(UpperCAmelCase__ , training=UpperCAmelCase__ )
A__ = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ):
A__ = self.prepare_config_and_inputs()
((A__) , (A__) , (A__)) = config_and_inputs
A__ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class UpperCamelCase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCAmelCase : List[str] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowerCAmelCase : List[str] = {"""feature-extraction""": TFViTMAEModel} if is_tf_available() else {}
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = False
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = False
def __A ( self ):
A__ = TFViTMAEModelTester(self )
A__ = ConfigTester(self , config_class=UpperCAmelCase__ , has_text_modality=UpperCAmelCase__ , hidden_size=37 )
def __A ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __A ( self ):
pass
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
A__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase__ , tf.keras.layers.Layer ) )
def __A ( self ):
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__ )
A__ = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A__ = [*signature.parameters.keys()]
A__ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __A ( self ):
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def __A ( self ):
# make the mask reproducible
np.random.seed(2 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = int((config.image_size // config.patch_size) ** 2 )
A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__ )
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
A__ = copy.deepcopy(self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ ) )
A__ = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
A__ = outputs_dict[0].numpy()
A__ = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __A ( self ):
# make the mask reproducible
np.random.seed(2 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = int((config.image_size // config.patch_size) ** 2 )
A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(UpperCAmelCase__ ):
A__ = {}
for k, v in inputs_dict.items():
if tf.is_tensor(UpperCAmelCase__ ):
A__ = v.numpy()
else:
A__ = np.array(UpperCAmelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__ )
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = prepare_numpy_arrays(UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
A__ = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ):
# make masks reproducible
np.random.seed(2 )
A__ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A__ = tf.constant(UpperCAmelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A__ = tf_noise
super().check_pt_tf_models(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
def __A ( self ):
# make mask reproducible
np.random.seed(2 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(UpperCAmelCase__ )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(UpperCAmelCase__ , UpperCAmelCase__ ),)
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(UpperCAmelCase__ , "_keras_serializable" , UpperCAmelCase__ )
}
A__ = int((config.image_size // config.patch_size) ** 2 )
A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A__ = tf.convert_to_tensor(UpperCAmelCase__ )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
A__ = main_layer_class(UpperCAmelCase__ )
A__ = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
A__ = tf.keras.Model(UpperCAmelCase__ , outputs=main_layer(UpperCAmelCase__ ) )
A__ = model(UpperCAmelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(UpperCAmelCase__ , "keras_model.h5" )
model.save(UpperCAmelCase__ )
A__ = tf.keras.models.load_model(
UpperCAmelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(UpperCAmelCase__ , tf.keras.Model )
A__ = model(UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
@slow
def __A ( self ):
# make mask reproducible
np.random.seed(2 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = int((config.image_size // config.patch_size) ** 2 )
A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__ )
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
A__ = outputs.last_hidden_state.numpy()
A__ = 0
else:
A__ = outputs.logits.numpy()
A__ = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(UpperCAmelCase__ , saved_model=UpperCAmelCase__ )
A__ = model_class.from_pretrained(UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
if model_class.__name__ == "TFViTMAEModel":
A__ = after_outputs["last_hidden_state"].numpy()
A__ = 0
else:
A__ = after_outputs["logits"].numpy()
A__ = 0
A__ = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(UpperCAmelCase__ , 1e-5 )
def __A ( self ):
# make mask reproducible
np.random.seed(2 )
A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common()
A__ = int((config.image_size // config.patch_size) ** 2 )
A__ = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
A__ = model_class(UpperCAmelCase__ )
A__ = self._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ )
A__ = model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
A__ = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(UpperCAmelCase__ )
A__ = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
A__ = model_class.from_config(model.config )
A__ = new_model(UpperCAmelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
A__ = new_model(UpperCAmelCase__ , noise=UpperCAmelCase__ )
self.assert_outputs_same(UpperCAmelCase__ , UpperCAmelCase__ )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __A ( self ):
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __A ( self ):
pass
@slow
def __A ( self ):
A__ = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(UpperCAmelCase__ )
def UpperCamelCase ( )-> Optional[Any]:
"""simple docstring"""
A__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class UpperCamelCase ( unittest.TestCase ):
@cached_property
def __A ( self ):
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __A ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
A__ = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
A__ = self.default_image_processor
A__ = prepare_img()
A__ = image_processor(images=UpperCAmelCase__ , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A__ = ViTMAEConfig()
A__ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A__ = np.random.uniform(size=(1, num_patches) )
# forward pass
A__ = model(**UpperCAmelCase__ , noise=UpperCAmelCase__ )
# verify the logits
A__ = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , UpperCAmelCase__ )
A__ = tf.convert_to_tensor(
[[-0.0_548, -1.7_023, -0.9_325], [0.3_721, -0.5_670, -0.2_233], [0.8_235, -1.3_878, -0.3_524]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , UpperCAmelCase__ , atol=1e-4 )
| 350
|
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
UpperCAmelCase_ : List[Any] = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def UpperCamelCase ( _A : List[Any] , _A : int=None )-> Optional[int]:
"""simple docstring"""
require_version(deps[pkg] , _A )
| 198
| 0
|
def UpperCamelCase( __UpperCamelCase : list[list[float]] ):
lowerCAmelCase_ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(__UpperCamelCase ):
if len(__UpperCamelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(__UpperCamelCase ) )
return data_lists
def UpperCamelCase( __UpperCamelCase : list[list[float]] ,__UpperCamelCase : list[int] ):
lowerCAmelCase_ : list[list[float]] = []
for dlist, weight in zip(__UpperCamelCase ,__UpperCamelCase ):
lowerCAmelCase_ : int = min(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = max(__UpperCamelCase )
lowerCAmelCase_ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
lowerCAmelCase_ : str = f"""Invalid weight of {weight:f} provided"""
raise ValueError(__UpperCamelCase )
score_lists.append(__UpperCamelCase )
return score_lists
def UpperCamelCase( __UpperCamelCase : list[list[float]] ):
lowerCAmelCase_ : list[float] = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(__UpperCamelCase ):
lowerCAmelCase_ : List[str] = final_scores[j] + ele
return final_scores
def UpperCamelCase( __UpperCamelCase : list[list[float]] ,__UpperCamelCase : list[int] ):
lowerCAmelCase_ : Optional[Any] = get_data(__UpperCamelCase )
lowerCAmelCase_ : Tuple = calculate_each_score(__UpperCamelCase ,__UpperCamelCase )
lowerCAmelCase_ : Union[str, Any] = generate_final_scores(__UpperCamelCase )
# append scores to source data
for i, ele in enumerate(__UpperCamelCase ):
source_data[i].append(__UpperCamelCase )
return source_data
| 103
|
import argparse
import os
import re
import packaging.version
A__ : Dict = '''examples/'''
A__ : Any = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
A__ : Any = {
'''init''': '''src/transformers/__init__.py''',
'''setup''': '''setup.py''',
}
A__ : Any = '''README.md'''
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any] ,__UpperCamelCase : List[Any] ):
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Tuple = f.read()
lowerCAmelCase_ , lowerCAmelCase_ : Dict = REPLACE_PATTERNS[pattern]
lowerCAmelCase_ : Tuple = replace.replace('''VERSION''' ,__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = re_pattern.sub(__UpperCamelCase ,__UpperCamelCase )
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.write(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Union[str, Any] ):
for folder, directories, fnames in os.walk(__UpperCamelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__UpperCamelCase ,__UpperCamelCase ) ,__UpperCamelCase ,pattern='''examples''' )
def UpperCamelCase( __UpperCamelCase : int ,__UpperCamelCase : List[Any]=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
if not patch:
update_version_in_examples(__UpperCamelCase )
def UpperCamelCase( ):
lowerCAmelCase_ : List[str] = '''🤗 Transformers currently provides the following architectures'''
lowerCAmelCase_ : List[Any] = '''1. Want to contribute a new model?'''
with open(__UpperCamelCase ,'''r''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
lowerCAmelCase_ : Union[str, Any] = f.readlines()
# Find the start of the list.
lowerCAmelCase_ : int = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCAmelCase_ : str = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
lowerCAmelCase_ : int = lines[index].replace(
'''https://huggingface.co/docs/transformers/main/model_doc''' ,'''https://huggingface.co/docs/transformers/model_doc''' ,)
index += 1
with open(__UpperCamelCase ,'''w''' ,encoding='''utf-8''' ,newline='''\n''' ) as f:
f.writelines(__UpperCamelCase )
def UpperCamelCase( ):
with open(REPLACE_FILES['''init'''] ,'''r''' ) as f:
lowerCAmelCase_ : Optional[Any] = f.read()
lowerCAmelCase_ : Dict = REPLACE_PATTERNS['''init'''][0].search(__UpperCamelCase ).groups()[0]
return packaging.version.parse(__UpperCamelCase )
def UpperCamelCase( __UpperCamelCase : Dict=False ):
lowerCAmelCase_ : Union[str, Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
lowerCAmelCase_ : List[str] = default_version.base_version
elif patch:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCAmelCase_ : int = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are you releasing? [{default_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : List[str] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase ,patch=__UpperCamelCase )
if not patch:
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
def UpperCamelCase( ):
lowerCAmelCase_ : Any = get_version()
lowerCAmelCase_ : int = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCAmelCase_ : Optional[Any] = current_version.base_version
# Check with the user we got that right.
lowerCAmelCase_ : Optional[Any] = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(__UpperCamelCase ) == 0:
lowerCAmelCase_ : int = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(__UpperCamelCase )
print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' )
clean_main_ref_in_model_list()
if __name__ == "__main__":
A__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
A__ : Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 103
| 1
|
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class UpperCAmelCase_ :
def __init__( self : Tuple , A : str , A : Dict=2 , A : List[Any]=3_2 , A : Optional[Any]=1_6 , A : Tuple=3 , A : Optional[Any]=True , A : List[Any]=True , A : Optional[int]=3_2 , A : Optional[int]=4 , A : Tuple=[0, 1, 2, 3] , A : Optional[int]=4 , A : Tuple=3_7 , A : List[Any]="gelu" , A : List[Any]=0.1 , A : List[str]=0.1 , A : Union[str, Any]=0.02 , A : Optional[int]=3 , A : Optional[Any]=[1, 3_8_4, 2_4, 2_4] , A : Union[str, Any]=True , A : Any=None , ):
_UpperCAmelCase : str = parent
_UpperCAmelCase : int = batch_size
_UpperCAmelCase : List[str] = image_size
_UpperCAmelCase : Tuple = patch_size
_UpperCAmelCase : Any = num_channels
_UpperCAmelCase : List[str] = is_training
_UpperCAmelCase : Optional[Any] = use_labels
_UpperCAmelCase : str = hidden_size
_UpperCAmelCase : Dict = num_hidden_layers
_UpperCAmelCase : List[Any] = backbone_out_indices
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : Optional[int] = intermediate_size
_UpperCAmelCase : str = hidden_act
_UpperCAmelCase : int = hidden_dropout_prob
_UpperCAmelCase : Any = attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] = initializer_range
_UpperCAmelCase : Optional[Any] = num_labels
_UpperCAmelCase : Tuple = backbone_featmap_shape
_UpperCAmelCase : Optional[Any] = scope
_UpperCAmelCase : Union[str, Any] = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCAmelCase : int = num_patches + 1
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Dict = None
if self.use_labels:
_UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase : str = self.get_config()
return config, pixel_values, labels
def snake_case_ ( self : str ):
_UpperCAmelCase : Tuple = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [9_6, 1_9_2, 3_8_4, 7_6_8],
"num_groups": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=A , backbone_featmap_shape=self.backbone_featmap_shape , )
def snake_case_ ( self : Any , A : Optional[Any] , A : str , A : Dict ):
_UpperCAmelCase : List[str] = DPTModel(config=A )
model.to(A )
model.eval()
_UpperCAmelCase : List[str] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self : List[str] , A : str , A : Any , A : List[Any] ):
_UpperCAmelCase : str = self.num_labels
_UpperCAmelCase : Any = DPTForDepthEstimation(A )
model.to(A )
model.eval()
_UpperCAmelCase : Optional[int] = model(A )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def snake_case_ ( self : List[Any] , A : Any , A : Optional[int] , A : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.num_labels
_UpperCAmelCase : Union[str, Any] = DPTForSemanticSegmentation(A )
model.to(A )
model.eval()
_UpperCAmelCase : int = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = config_and_inputs
_UpperCAmelCase : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : List[str] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
__SCREAMING_SNAKE_CASE : Any = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : Optional[int] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
def snake_case_ ( self : int ):
_UpperCAmelCase : List[str] = DPTModelTester(self )
_UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 )
def snake_case_ ( self : int ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DPT does not use inputs_embeds" )
def snake_case_ ( self : Union[str, Any] ):
pass
def snake_case_ ( self : Tuple ):
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : str = model_class(A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A , nn.Linear ) )
def snake_case_ ( self : Optional[Any] ):
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : int = model_class(A )
_UpperCAmelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A )
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*A )
def snake_case_ ( self : Dict ):
_UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
def snake_case_ ( self : Optional[int] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[int] = True
if model_class in get_values(A ):
continue
_UpperCAmelCase : int = model_class(A )
model.to(A )
model.train()
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(A , A , return_labels=A )
_UpperCAmelCase : Optional[Any] = model(**A ).loss
loss.backward()
def snake_case_ ( self : Dict ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase , _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Tuple = False
_UpperCAmelCase : Tuple = True
if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase : int = model_class(A )
model.to(A )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase : str = self._prepare_for_class(A , A , return_labels=A )
_UpperCAmelCase : List[str] = model(**A ).loss
loss.backward()
def snake_case_ ( self : Union[str, Any] ):
_UpperCAmelCase , _UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : int = _config_zero_init(A )
for model_class in self.all_model_classes:
_UpperCAmelCase : List[Any] = model_class(config=A )
# Skip the check for the backbone
_UpperCAmelCase : Dict = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase : List[str] = [f'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def snake_case_ ( self : int ):
pass
@slow
def snake_case_ ( self : Dict ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase : Any = DPTModel.from_pretrained(A )
self.assertIsNotNone(A )
def snake_case_ ( self : Tuple ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[Any] = "add"
with self.assertRaises(A ):
_UpperCAmelCase : List[Any] = DPTForDepthEstimation(A )
def __snake_case ( ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
def snake_case_ ( self : List[str] ):
_UpperCAmelCase : Optional[int] = DPTImageProcessor.from_pretrained("Intel/dpt-hybrid-midas" )
_UpperCAmelCase : Any = DPTForDepthEstimation.from_pretrained("Intel/dpt-hybrid-midas" ).to(A )
_UpperCAmelCase : str = prepare_img()
_UpperCAmelCase : Tuple = image_processor(images=A , return_tensors="pt" ).to(A )
# forward pass
with torch.no_grad():
_UpperCAmelCase : int = model(**A )
_UpperCAmelCase : List[str] = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase : int = torch.Size((1, 3_8_4, 3_8_4) )
self.assertEqual(predicted_depth.shape , A )
_UpperCAmelCase : int = torch.tensor(
[[[5.6_437, 5.6_146, 5.6_511], [5.4_371, 5.5_649, 5.5_958], [5.5_215, 5.5_184, 5.5_293]]] ).to(A )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_0_0 , A , atol=1e-4 ) )
| 202
|
"""simple docstring"""
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowerCAmelCase : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_lowerCAmelCase : List[Any] = importlib.util.spec_from_file_location(
"transformers",
os.path.join(PATH_TO_TRANSFORMERS, "__init__.py"),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowerCAmelCase : Tuple = spec.loader.load_module()
_lowerCAmelCase : Tuple = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowerCAmelCase : Optional[int] = re.compile("\[(.+?)\]\((https://huggingface\.co/.+?)\)")
_lowerCAmelCase : Optional[int] = {
"CLIPConfigMixin",
"DecisionTransformerConfigMixin",
"EncoderDecoderConfigMixin",
"RagConfigMixin",
"SpeechEncoderDecoderConfigMixin",
"VisionEncoderDecoderConfigMixin",
"VisionTextDualEncoderConfigMixin",
}
def __snake_case ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : List[str] = []
for config_class in list(CONFIG_MAPPING.values() ):
_UpperCAmelCase : Union[str, Any] = False
# source code of `config_class`
_UpperCAmelCase : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE__ )
_UpperCAmelCase : List[Any] = _re_checkpoint.findall(SCREAMING_SNAKE_CASE__ )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
_UpperCAmelCase , _UpperCAmelCase : List[Any] = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
_UpperCAmelCase : Optional[Any] = f'https://huggingface.co/{ckpt_name}'
if ckpt_link == ckpt_link_from_name:
_UpperCAmelCase : Optional[Any] = True
break
_UpperCAmelCase : int = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE__ )
if len(SCREAMING_SNAKE_CASE__ ) > 0:
_UpperCAmelCase : List[str] = "\n".join(sorted(SCREAMING_SNAKE_CASE__ ) )
raise ValueError(f'The following configurations don\'t contain any valid checkpoint:\n{message}' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 202
| 1
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCamelCase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 113
|
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__)
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : Any ) -> Dict:
A_ : Optional[Any] = nn.functional.normalize(_lowerCAmelCase )
A_ : List[str] = nn.functional.normalize(_lowerCAmelCase )
return torch.mm(_lowerCAmelCase , normalized_text_embeds.t() )
class __magic_name__ ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCamelCase = CLIPConfig
__UpperCamelCase = ['''CLIPEncoderLayer''']
def __init__( self :int , snake_case :CLIPConfig ):
'''simple docstring'''
super().__init__(snake_case )
A_ : int = CLIPVisionModel(config.vision_config )
A_ : List[str] = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=snake_case )
A_ : Tuple = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=snake_case )
A_ : str = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=snake_case )
A_ : List[str] = nn.Parameter(torch.ones(17 ) , requires_grad=snake_case )
A_ : int = nn.Parameter(torch.ones(3 ) , requires_grad=snake_case )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Dict , snake_case :Any ):
'''simple docstring'''
A_ : List[Any] = self.vision_model(snake_case )[1] # pooled_output
A_ : List[Any] = self.visual_projection(snake_case )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
A_ : Optional[Any] = cosine_distance(snake_case , self.special_care_embeds ).cpu().float().numpy()
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds ).cpu().float().numpy()
A_ : Union[str, Any] = []
A_ : Any = image_embeds.shape[0]
for i in range(snake_case ):
A_ : Optional[int] = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
A_ : Optional[Any] = special_cos_dist[i][concept_idx]
A_ : Tuple = self.special_care_embeds_weights[concept_idx].item()
A_ : Union[str, Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
A_ : Any = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
A_ : Tuple = cos_dist[i][concept_idx]
A_ : Tuple = self.concept_embeds_weights[concept_idx].item()
A_ : Tuple = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(snake_case )
result.append(snake_case )
A_ : Any = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :torch.FloatTensor , snake_case :torch.FloatTensor ):
'''simple docstring'''
A_ : List[str] = self.vision_model(snake_case )[1] # pooled_output
A_ : int = self.visual_projection(snake_case )
A_ : Tuple = cosine_distance(snake_case , self.special_care_embeds )
A_ : Tuple = cosine_distance(snake_case , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
A_ : Optional[Any] = 0.0
A_ : Tuple = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
A_ : Optional[Any] = torch.any(special_scores > 0 , dim=1 )
A_ : Optional[Any] = special_care * 0.01
A_ : Optional[int] = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
A_ : Union[str, Any] = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
A_ : Union[str, Any] = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 300
| 0
|
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( a__ , unittest.TestCase ):
UpperCAmelCase__ : Tuple = BertTokenizer
UpperCAmelCase__ : Tuple = BertTokenizerFast
UpperCAmelCase__ : Union[str, Any] = True
UpperCAmelCase__ : int = True
UpperCAmelCase__ : Union[str, Any] = filter_non_english
def snake_case_ ( self ) -> Dict:
super().setUp()
UpperCamelCase : List[Any] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase : int = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file, 'w', encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def snake_case_ ( self, SCREAMING_SNAKE_CASE_ ) -> List[Any]:
UpperCamelCase : List[Any] = 'UNwant\u00E9d,running'
UpperCamelCase : List[Any] = 'unwanted, running'
return input_text, output_text
def snake_case_ ( self ) -> Any:
UpperCamelCase : str = self.tokenizer_class(self.vocab_file )
UpperCamelCase : List[str] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(SCREAMING_SNAKE_CASE_, ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE_ ), [9, 6, 7, 12, 10, 11] )
def snake_case_ ( self ) -> List[str]:
if not self.test_rust_tokenizer:
return
UpperCamelCase : List[Any] = self.get_tokenizer()
UpperCamelCase : Any = self.get_rust_tokenizer()
UpperCamelCase : Any = 'UNwant\u00E9d,running'
UpperCamelCase : Optional[int] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.get_rust_tokenizer()
UpperCamelCase : Tuple = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# With lower casing
UpperCamelCase : Optional[Any] = self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_rust_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = 'UNwant\u00E9d,running'
UpperCamelCase : Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = self.get_rust_tokenizer()
UpperCamelCase : Union[str, Any] = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[int] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ), ['ah', '\u535A', '\u63A8', 'zz'] )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['h\u00E9llo'] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Dict = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ), ['hello'] )
def snake_case_ ( self ) -> List[str]:
UpperCamelCase : List[str] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : List[str] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_, strip_accents=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ), ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : Optional[Any] = BasicTokenizer(do_lower_case=SCREAMING_SNAKE_CASE_, never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ), ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def snake_case_ ( self ) -> Optional[Any]:
UpperCamelCase : str = BasicTokenizer()
UpperCamelCase : Optional[int] = 'a\n\'ll !!to?\'d of, can\'t.'
UpperCamelCase : List[str] = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Optional[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
UpperCamelCase : str = {}
for i, token in enumerate(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase : List[str] = i
UpperCamelCase : Tuple = WordpieceTokenizer(vocab=SCREAMING_SNAKE_CASE_, unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ), [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ), ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ), ['[UNK]', 'runn', '##ing'] )
def snake_case_ ( self ) -> Dict:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def snake_case_ ( self ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def snake_case_ ( self ) -> Optional[int]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Dict = self.get_tokenizer()
UpperCamelCase : Optional[Any] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ ) for t in ['Test', '\xad', 'test']], [['[UNK]'], [], ['[UNK]']] )
@slow
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : Tuple = self.tokenizer_class.from_pretrained('bert-base-uncased' )
UpperCamelCase : Optional[Any] = tokenizer.encode('sequence builders', add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Optional[Any] = tokenizer.encode('multi-sequence build', add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = tokenizer.build_inputs_with_special_tokens(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def snake_case_ ( self ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
UpperCamelCase : int = tokenizer_r.encode_plus(
SCREAMING_SNAKE_CASE_, return_attention_mask=SCREAMING_SNAKE_CASE_, return_token_type_ids=SCREAMING_SNAKE_CASE_, return_offsets_mapping=SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_, )
UpperCamelCase : Dict = tokenizer_r.do_lower_case if hasattr(SCREAMING_SNAKE_CASE_, 'do_lower_case' ) else False
UpperCamelCase : List[Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results], tokens['offset_mapping'] )
def snake_case_ ( self ) -> Tuple:
UpperCamelCase : Optional[Any] = ['的', '人', '有']
UpperCamelCase : str = ''.join(SCREAMING_SNAKE_CASE_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
UpperCamelCase : str = True
UpperCamelCase : Optional[Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : str = tokenizer_p.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : int = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = False
UpperCamelCase : int = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = self.tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = tokenizer_r.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Any = tokenizer_p.encode(SCREAMING_SNAKE_CASE_, add_special_tokens=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Tuple = tokenizer_r.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = tokenizer_p.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
# it is expected that only the first Chinese character is not preceded by "##".
UpperCamelCase : int = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(SCREAMING_SNAKE_CASE_ )
]
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
| 103
|
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class lowerCAmelCase_ :
def __init__( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=19, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.02, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> Optional[int]:
UpperCamelCase : Any = parent
UpperCamelCase : Optional[Any] = batch_size
UpperCamelCase : Optional[Any] = seq_length
UpperCamelCase : Tuple = is_training
UpperCamelCase : List[str] = use_input_mask
UpperCamelCase : Optional[int] = use_token_type_ids
UpperCamelCase : Any = use_labels
UpperCamelCase : Union[str, Any] = vocab_size
UpperCamelCase : int = hidden_size
UpperCamelCase : List[Any] = num_hidden_layers
UpperCamelCase : Dict = num_attention_heads
UpperCamelCase : List[Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : List[Any] = hidden_dropout_prob
UpperCamelCase : List[str] = attention_probs_dropout_prob
UpperCamelCase : List[str] = max_position_embeddings
UpperCamelCase : Union[str, Any] = type_vocab_size
UpperCamelCase : str = type_sequence_label_size
UpperCamelCase : List[str] = initializer_range
UpperCamelCase : List[Any] = num_labels
UpperCamelCase : Any = num_choices
UpperCamelCase : Tuple = scope
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : str = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
UpperCamelCase : Optional[Any] = None
if self.use_input_mask:
UpperCamelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase : Union[str, Any] = None
UpperCamelCase : Optional[int] = None
UpperCamelCase : Optional[int] = None
if self.use_labels:
UpperCamelCase : int = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
UpperCamelCase : Tuple = ids_tensor([self.batch_size], self.num_choices )
UpperCamelCase : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : List[str] = EsmConfig(
vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=SCREAMING_SNAKE_CASE_, esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False}, )
return config
def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Tuple:
UpperCamelCase : Union[str, Any] = EsmForProteinFolding(config=SCREAMING_SNAKE_CASE_ ).float()
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCamelCase : int = model(SCREAMING_SNAKE_CASE_, attention_mask=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2) )
def snake_case_ ( self ) -> Optional[int]:
UpperCamelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : Tuple = config_and_inputs
UpperCamelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ):
UpperCAmelCase__ : Dict = False
UpperCAmelCase__ : Optional[int] = (EsmForProteinFolding,) if is_torch_available() else ()
UpperCAmelCase__ : int = ()
UpperCAmelCase__ : List[str] = {} if is_torch_available() else {}
UpperCAmelCase__ : Optional[int] = False
def snake_case_ ( self ) -> Dict:
UpperCamelCase : Tuple = EsmFoldModelTester(self )
UpperCamelCase : List[str] = ConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, hidden_size=37 )
def snake_case_ ( self ) -> int:
self.config_tester.run_common_tests()
def snake_case_ ( self ) -> Union[str, Any]:
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@unittest.skip('Does not support attention outputs' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('Esm does not support embedding resizing' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Dict:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Union[str, Any]:
pass
@unittest.skip('ESMFold does not support head pruning.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('ESMFold only has one output format.' )
def snake_case_ ( self ) -> int:
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold does not support input chunking.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def snake_case_ ( self ) -> Tuple:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> Any:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> str:
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def snake_case_ ( self ) -> List[Any]:
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def snake_case_ ( self ) -> List[str]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def snake_case_ ( self ) -> Optional[Any]:
pass
@require_torch
class lowerCAmelCase_ ( a__ ):
@slow
def snake_case_ ( self ) -> str:
UpperCamelCase : Union[str, Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
UpperCamelCase : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
UpperCamelCase : List[str] = model(SCREAMING_SNAKE_CASE_ )['positions']
UpperCamelCase : int = torch.tensor([2.58_28, 0.79_93, -10.93_34], dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], SCREAMING_SNAKE_CASE_, atol=1e-4 ) )
| 103
| 1
|
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ) ->float:
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 58
|
'''simple docstring'''
from string import ascii_lowercase, ascii_uppercase
def lowerCamelCase ( __lowerCamelCase : str ) ->str:
if not sentence:
return ""
_SCREAMING_SNAKE_CASE = dict(zip(__lowerCamelCase , __lowerCamelCase ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 58
| 1
|
import numpy as np
from cva import COLOR_BGR2GRAY, cvtColor, imread
from numpy import array, uinta
from PIL import Image
from digital_image_processing import change_contrast as cc
from digital_image_processing import convert_to_negative as cn
from digital_image_processing import sepia as sp
from digital_image_processing.dithering import burkes as bs
from digital_image_processing.edge_detection import canny
from digital_image_processing.filters import convolve as conv
from digital_image_processing.filters import gaussian_filter as gg
from digital_image_processing.filters import local_binary_pattern as lbp
from digital_image_processing.filters import median_filter as med
from digital_image_processing.filters import sobel_filter as sob
from digital_image_processing.resize import resize as rs
UpperCAmelCase_ = imread(R'digital_image_processing/image_data/lena_small.jpg')
UpperCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY)
def lowerCAmelCase_ ( ) -> str:
UpperCamelCase__ : List[str] = cn.convert_to_negative(__UpperCAmelCase )
# assert negative_img array for at least one True
assert negative_img.any()
def lowerCAmelCase_ ( ) -> List[Any]:
with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img:
# Work around assertion for response
assert str(cc.change_contrast(__UpperCAmelCase , 110 ) ).startswith(
'''<PIL.Image.Image image mode=RGB size=100x100 at''' )
def lowerCAmelCase_ ( ) -> Dict:
UpperCamelCase__ : Optional[int] = canny.gen_gaussian_kernel(9 , sigma=1.4 )
# Assert ambiguous array
assert resp.all()
def lowerCAmelCase_ ( ) -> Optional[Any]:
UpperCamelCase__ : Any = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 )
# assert ambiguous array for all == True
assert canny_img.all()
UpperCamelCase__ : str = canny.canny(__UpperCAmelCase )
# assert canny array for at least one True
assert canny_array.any()
def lowerCAmelCase_ ( ) -> Tuple:
assert gg.gaussian_filter(__UpperCAmelCase , 5 , sigma=0.9 ).all()
def lowerCAmelCase_ ( ) -> List[Any]:
# laplace diagonals
UpperCamelCase__ : Tuple = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] )
UpperCamelCase__ : Dict = conv.img_convolve(__UpperCAmelCase , __UpperCAmelCase ).astype(__UpperCAmelCase )
assert res.any()
def lowerCAmelCase_ ( ) -> List[str]:
assert med.median_filter(__UpperCAmelCase , 3 ).any()
def lowerCAmelCase_ ( ) -> int:
UpperCamelCase__ ,UpperCamelCase__ : List[Any] = sob.sobel_filter(__UpperCAmelCase )
assert grad.any() and theta.any()
def lowerCAmelCase_ ( ) -> Optional[Any]:
UpperCamelCase__ : int = sp.make_sepia(__UpperCAmelCase , 20 )
assert sepia.all()
def lowerCAmelCase_ ( __UpperCAmelCase: str = "digital_image_processing/image_data/lena_small.jpg" ) -> Any:
UpperCamelCase__ : Union[str, Any] = bs.Burkes(imread(__UpperCAmelCase , 1 ) , 120 )
burkes.process()
assert burkes.output_img.any()
def lowerCAmelCase_ ( __UpperCAmelCase: str = "digital_image_processing/image_data/lena_small.jpg" , ) -> Any:
UpperCamelCase__ : Optional[int] = rs.NearestNeighbour(imread(__UpperCAmelCase , 1 ) , 400 , 200 )
nn.process()
assert nn.output.any()
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : Optional[Any] = '''digital_image_processing/image_data/lena.jpg'''
# Reading the image and converting it to grayscale.
UpperCamelCase__ : List[str] = imread(__UpperCAmelCase , 0 )
# Test for get_neighbors_pixel function() return not None
UpperCamelCase__ : int = 0
UpperCamelCase__ : Optional[Any] = 0
UpperCamelCase__ : Union[str, Any] = image[x_coordinate][y_coordinate]
UpperCamelCase__ : List[str] = lbp.get_neighbors_pixel(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert neighbors_pixels is not None
# Test for local_binary_pattern function()
# Create a numpy array as the same height and width of read image
UpperCamelCase__ : Union[str, Any] = np.zeros((image.shape[0], image.shape[1]) )
# Iterating through the image and calculating the local binary pattern value
# for each pixel.
for i in range(0 , image.shape[0] ):
for j in range(0 , image.shape[1] ):
UpperCamelCase__ : List[str] = lbp.local_binary_value(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
assert lbp_image.any()
| 247
|
from manim import *
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ : int = Rectangle(height=0.5, width=0.5 )
UpperCamelCase__ : Optional[int] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0 )
UpperCamelCase__ : Dict = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Any = [mem.copy() for i in range(6 )]
UpperCamelCase__ : int = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Tuple = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : int = VGroup(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Optional[int] = Text('''CPU''', font_size=24 )
UpperCamelCase__ : Any = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__magic_name__ )
UpperCamelCase__ : Any = [mem.copy() for i in range(1 )]
UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Union[str, Any] = Text('''GPU''', font_size=24 )
UpperCamelCase__ : List[Any] = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
gpu.align_to(__magic_name__, __magic_name__ )
gpu.set_x(gpu.get_x() - 1 )
self.add(__magic_name__ )
UpperCamelCase__ : str = [mem.copy() for i in range(6 )]
UpperCamelCase__ : Optional[int] = VGroup(*__magic_name__ ).arrange(__magic_name__, buff=0 )
UpperCamelCase__ : Optional[int] = Text('''Model''', font_size=24 )
UpperCamelCase__ : int = Group(__magic_name__, __magic_name__ ).arrange(__magic_name__, buff=0.5, aligned_edge=__magic_name__ )
model.move_to([3, -1.0, 0] )
self.play(
Create(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ), Create(__magic_name__, run_time=1 ), )
UpperCamelCase__ : Optional[int] = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.", font_size=24, )
UpperCamelCase__ : List[str] = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCamelCase__ : Union[str, Any] = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model", font_size=18, )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__magic_name__, run_time=2.5 ), Write(__magic_name__ ), Write(__magic_name__ ) )
self.add(__magic_name__ )
UpperCamelCase__ : Dict = []
UpperCamelCase__ : Any = []
UpperCamelCase__ : int = []
for i, rect in enumerate(__magic_name__ ):
UpperCamelCase__ : Union[str, Any] = Rectangle(height=0.46, width=0.46 ).set_stroke(width=0.0 ).set_fill(__magic_name__, opacity=0.7 )
cpu_target.move_to(__magic_name__ )
cpu_target.generate_target()
UpperCamelCase__ : Tuple = 0.46 / 4
UpperCamelCase__ : Optional[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ), buff=0.02, direction=__magic_name__ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target, direction=__magic_name__, buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target, direction=__magic_name__, buff=0.0 )
cpu_targs.append(__magic_name__ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__magic_name__ ) )
second_animations.append(MoveToTarget(__magic_name__, run_time=1.5 ) )
self.play(*__magic_name__ )
self.play(*__magic_name__ )
self.wait()
| 247
| 1
|
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class _A ( a__):
SCREAMING_SNAKE_CASE : Optional[Any] = '''naver-clova-ix/donut-base-finetuned-docvqa'''
SCREAMING_SNAKE_CASE : Dict = (
'''This is a tool that answers a question about an document (pdf). It takes an input named `document` which '''
'''should be the document containing the information, as well as a `question` that is the question about the '''
'''document. It returns a text that contains the answer to the question.'''
)
SCREAMING_SNAKE_CASE : Dict = '''document_qa'''
SCREAMING_SNAKE_CASE : str = AutoProcessor
SCREAMING_SNAKE_CASE : int = VisionEncoderDecoderModel
SCREAMING_SNAKE_CASE : List[str] = ['''image''', '''text''']
SCREAMING_SNAKE_CASE : List[str] = ['''text''']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*__lowerCAmelCase , **__lowerCAmelCase )
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = '''<s_docvqa><s_question>{user_input}</s_question><s_answer>'''
SCREAMING_SNAKE_CASE_ : str = task_prompt.replace('{user_input}' , __lowerCAmelCase )
SCREAMING_SNAKE_CASE_ : Any = self.pre_processor.tokenizer(
__lowerCAmelCase , add_special_tokens=__lowerCAmelCase , return_tensors='pt' ).input_ids
SCREAMING_SNAKE_CASE_ : Dict = self.pre_processor(__lowerCAmelCase , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__lowerCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__lowerCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__lowerCAmelCase , ).sequences
def UpperCAmelCase ( self , _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = self.pre_processor.batch_decode(__lowerCAmelCase )[0]
SCREAMING_SNAKE_CASE_ : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
SCREAMING_SNAKE_CASE_ : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
SCREAMING_SNAKE_CASE_ : Optional[int] = re.sub(r'<.*?>' , '' , __lowerCAmelCase , count=1 ).strip() # remove first task start token
SCREAMING_SNAKE_CASE_ : Tuple = self.pre_processor.tokenajson(__lowerCAmelCase )
return sequence["answer"]
| 253
|
'''simple docstring'''
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
__a: Tuple = None
__a: Tuple = logging.get_logger(__name__)
__a: Optional[Any] = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
__a: Optional[Any] = {
"""vocab_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/spiece.model""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/spiece.model""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/spiece.model""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/spiece.model""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/tokenizer.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/tokenizer.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/tokenizer.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/tokenizer.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/tokenizer.json""",
},
}
# TODO(PVP) - this should be removed in Transformers v5
__a: Tuple = {
"""t5-small""": 5_12,
"""t5-base""": 5_12,
"""t5-large""": 5_12,
"""t5-3b""": 5_12,
"""t5-11b""": 5_12,
}
class UpperCAmelCase ( a__ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE = ["input_ids", "attention_mask"]
SCREAMING_SNAKE_CASE = TaTokenizer
SCREAMING_SNAKE_CASE = []
def __init__( self , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase="</s>" , __lowerCAmelCase="<unk>" , __lowerCAmelCase="<pad>" , __lowerCAmelCase=100 , __lowerCAmelCase=None , **__lowerCAmelCase , ) -> Union[str, Any]:
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
lowercase__ : Union[str, Any] = [F"""<extra_id_{i}>""" for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
lowercase__ : Dict = len(set(filter(lambda __lowerCAmelCase : bool('''extra_id_''' in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
__lowerCAmelCase , tokenizer_file=__lowerCAmelCase , eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , **__lowerCAmelCase , )
lowercase__ : Union[str, Any] = vocab_file
lowercase__ : Optional[int] = False if not self.vocab_file else True
lowercase__ : Any = extra_ids
@staticmethod
def _lowerCAmelCase( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Union[str, Any]:
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
lowercase__ : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , __lowerCAmelCase , )
return max_model_length
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowercase__ : List[Any] = os.path.join(
__lowerCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ):
copyfile(self.vocab_file , __lowerCAmelCase )
logger.info(F"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Any = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
lowercase__ : Dict = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCAmelCase( self , __lowerCAmelCase , __lowerCAmelCase = None ) -> List[int]:
lowercase__ : Optional[int] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCAmelCase( self ) -> List[Any]:
return list(
set(filter(lambda __lowerCAmelCase : bool(re.search(r'''<extra_id_\d+>''' , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCAmelCase( self ) -> Tuple:
return [self.convert_tokens_to_ids(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
| 198
| 0
|
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCAmelCase_ = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True})
lowerCAmelCase_ = Features({"""question""": Value("""string"""), """context""": Value("""string""")})
lowerCAmelCase_ = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string"""),
"""answer_start""": Value("""int32"""),
})
})
lowerCAmelCase_ = "question"
lowerCAmelCase_ = "context"
lowerCAmelCase_ = "answers"
@property
def UpperCAmelCase_ ( self )-> Dict[str, str]:
'''simple docstring'''
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 251
|
'''simple docstring'''
from itertools import product
def A_( A : int , A : int):
UpperCamelCase = sides_number
UpperCamelCase = max_face_number * dice_number
UpperCamelCase = [0] * (max_total + 1)
UpperCamelCase = 1
UpperCamelCase = range(A , max_face_number + 1)
for dice_numbers in product(A , repeat=A):
UpperCamelCase = sum(A)
totals_frequencies[total] += 1
return totals_frequencies
def A_( ):
UpperCamelCase = total_frequency_distribution(
sides_number=4 , dice_number=9)
UpperCamelCase = total_frequency_distribution(
sides_number=6 , dice_number=6)
UpperCamelCase = 0
UpperCamelCase = 9
UpperCamelCase = 4 * 9
UpperCamelCase = 6
for peter_total in range(A , max_peter_total + 1):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total])
UpperCamelCase = (4**9) * (6**6)
UpperCamelCase = peter_wins_count / total_games_number
UpperCamelCase = round(A , ndigits=7)
return rounded_peter_win_probability
if __name__ == "__main__":
print(f"""{solution() = }""")
| 251
| 1
|
from __future__ import annotations
import unittest
from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel
@require_tf
class lowerCamelCase :
"""simple docstring"""
lowerCamelCase__ = BlenderbotConfig
lowerCamelCase__ = {}
lowerCamelCase__ = "gelu"
def __init__( self : Optional[Any] , __magic_name__ : Tuple , __magic_name__ : Optional[Any]=13 , __magic_name__ : List[Any]=7 , __magic_name__ : Tuple=True , __magic_name__ : Tuple=False , __magic_name__ : Any=99 , __magic_name__ : Union[str, Any]=32 , __magic_name__ : int=2 , __magic_name__ : str=4 , __magic_name__ : str=37 , __magic_name__ : Any=0.1 , __magic_name__ : List[Any]=0.1 , __magic_name__ : Optional[Any]=20 , __magic_name__ : Dict=2 , __magic_name__ : List[str]=1 , __magic_name__ : List[str]=0 , ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = eos_token_id
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = bos_token_id
def __A ( self : str ) -> Dict:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE_ = prepare_blenderbot_inputs_dict(__magic_name__ , __magic_name__ , __magic_name__ )
return config, inputs_dict
def __A ( self : Any , __magic_name__ : Dict , __magic_name__ : Tuple ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = TFBlenderbotModel(config=__magic_name__ ).get_decoder()
SCREAMING_SNAKE_CASE_ = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE_ = input_ids[:1, :]
SCREAMING_SNAKE_CASE_ = inputs_dict['''attention_mask'''][:1, :]
SCREAMING_SNAKE_CASE_ = inputs_dict['''head_mask''']
SCREAMING_SNAKE_CASE_ = 1
# first forward pass
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , head_mask=__magic_name__ , use_cache=__magic_name__ )
SCREAMING_SNAKE_CASE_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
SCREAMING_SNAKE_CASE_ = tf.concat([input_ids, next_tokens] , axis=-1 )
SCREAMING_SNAKE_CASE_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ )[0]
SCREAMING_SNAKE_CASE_ = model(__magic_name__ , attention_mask=__magic_name__ , past_key_values=__magic_name__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
SCREAMING_SNAKE_CASE_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
SCREAMING_SNAKE_CASE_ = output_from_no_past[:, -3:, random_slice_idx]
SCREAMING_SNAKE_CASE_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__magic_name__ , __magic_name__ , rtol=1e-3 )
def a__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
if attention_mask is None:
SCREAMING_SNAKE_CASE_ = tf.cast(tf.math.not_equal(__UpperCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else ()
lowerCamelCase__ = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else ()
lowerCamelCase__ = (
{
"conversational": TFBlenderbotForConditionalGeneration,
"feature-extraction": TFBlenderbotModel,
"summarization": TFBlenderbotForConditionalGeneration,
"text2text-generation": TFBlenderbotForConditionalGeneration,
"translation": TFBlenderbotForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCamelCase__ = True
lowerCamelCase__ = False
lowerCamelCase__ = False
def __A ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE_ = TFBlenderbotModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=__magic_name__ )
def __A ( self : Tuple ) -> int:
self.config_tester.run_common_tests()
def __A ( self : Union[str, Any] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__magic_name__ )
@require_tokenizers
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = ["My friends are cool but they eat too many carbs."]
lowerCamelCase__ = "facebook/blenderbot-400M-distill"
@cached_property
def __A ( self : List[Any] ) -> Tuple:
return BlenderbotTokenizer.from_pretrained(self.model_name )
@cached_property
def __A ( self : Union[str, Any] ) -> str:
SCREAMING_SNAKE_CASE_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def __A ( self : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = self.tokenizer(self.src_text , return_tensors="tf" )
SCREAMING_SNAKE_CASE_ = self.model.generate(
model_inputs.input_ids , )
SCREAMING_SNAKE_CASE_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__magic_name__ )[0]
assert (
generated_words
== " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?"
)
| 118
|
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> list[list[int]]:
UpperCamelCase__ : list[list[int]] = []
create_all_state(1 , __UpperCAmelCase , __UpperCAmelCase , [] , __UpperCAmelCase )
return result
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: list[int] , __UpperCAmelCase: list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__UpperCAmelCase , total_number - level + 2 ):
current_list.append(__UpperCAmelCase )
create_all_state(i + 1 , __UpperCAmelCase , level - 1 , __UpperCAmelCase , __UpperCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __UpperCAmelCase: list[list[int]] ) -> None:
for i in total_list:
print(*__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = 4
UpperCAmelCase_ = 2
UpperCAmelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 201
| 0
|
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = arr.split("," )
def A__ ( self ) -> int:
'''simple docstring'''
lowercase_ = [int(self.array[0] )] * len(self.array )
lowercase_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
lowercase_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
lowercase_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = input("""please input some numbers:""")
SCREAMING_SNAKE_CASE__ = SubArray(whole_array)
SCREAMING_SNAKE_CASE__ = array.solve_sub_array()
print(("""the results is:""", re))
| 369
|
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __lowerCamelCase :
"""simple docstring"""
def __init__( self , UpperCAmelCase , UpperCAmelCase=13 , UpperCAmelCase=32 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=16 , UpperCAmelCase=[32, 64, 128] , UpperCAmelCase=[1, 2, 1] , UpperCAmelCase=[2, 2, 4] , UpperCAmelCase=2 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=True , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase=10 , UpperCAmelCase=8 , UpperCAmelCase=["stage1", "stage2"] , UpperCAmelCase=[1, 2] , ) -> Optional[int]:
'''simple docstring'''
lowercase_ = parent
lowercase_ = batch_size
lowercase_ = image_size
lowercase_ = patch_size
lowercase_ = num_channels
lowercase_ = embed_dim
lowercase_ = hidden_sizes
lowercase_ = depths
lowercase_ = num_heads
lowercase_ = window_size
lowercase_ = mlp_ratio
lowercase_ = qkv_bias
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = drop_path_rate
lowercase_ = hidden_act
lowercase_ = use_absolute_embeddings
lowercase_ = patch_norm
lowercase_ = layer_norm_eps
lowercase_ = initializer_range
lowercase_ = is_training
lowercase_ = scope
lowercase_ = use_labels
lowercase_ = type_sequence_label_size
lowercase_ = encoder_stride
lowercase_ = out_features
lowercase_ = out_indices
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ = None
if self.use_labels:
lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase_ = FocalNetModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
lowercase_ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase_ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
lowercase_ = None
lowercase_ = FocalNetBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = FocalNetForMaskedImageModeling(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForMaskedImageModeling(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase_ = self.type_sequence_label_size
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ = 1
lowercase_ = FocalNetForImageClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ = model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = self.prepare_config_and_inputs()
lowercase_ , lowercase_ , lowercase_ = config_and_inputs
lowercase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( snake_case_ , snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
lowercase_ = ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 , has_text_modality=UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
def A__ ( self ) -> Dict:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase )
@unittest.skip(reason="FocalNet does not use inputs_embeds" )
def A__ ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="FocalNet does not use feedforward chunking" )
def A__ ( self ) -> Tuple:
'''simple docstring'''
pass
def A__ ( self ) -> str:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self ) -> Any:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
lowercase_ = model_class(UpperCAmelCase )
lowercase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ = [*signature.parameters.keys()]
lowercase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int:
'''simple docstring'''
lowercase_ = model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase_ = model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase_ = outputs.hidden_states
lowercase_ = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# FocalNet has a different seq_length
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowercase_ = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
lowercase_ , lowercase_ , lowercase_ , lowercase_ = reshaped_hidden_states[0].shape
lowercase_ = (
reshaped_hidden_states[0].view(UpperCAmelCase , UpperCAmelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = 3
lowercase_ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase_ = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase_ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase_ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase_ = True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@slow
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ = FocalNetModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase_ , lowercase_ = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ = _config_zero_init(UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase_ = model_class(config=UpperCAmelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@require_vision
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> List[str]:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("microsoft/focalnet-tiny" ) if is_vision_available() else None
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ = FocalNetForImageClassification.from_pretrained("microsoft/focalnet-tiny" ).to(UpperCAmelCase )
lowercase_ = self.default_image_processor
lowercase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
lowercase_ = image_processor(images=UpperCAmelCase , return_tensors="pt" ).to(UpperCAmelCase )
# forward pass
with torch.no_grad():
lowercase_ = model(**UpperCAmelCase )
# verify the logits
lowercase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase )
lowercase_ = torch.tensor([0.2166, -0.4368, 0.2191] ).to(UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 281 )
@require_torch
class __lowerCamelCase ( snake_case_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (FocalNetBackbone,) if is_torch_available() else ()
lowerCAmelCase__ = FocalNetConfig
lowerCAmelCase__ = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
lowercase_ = FocalNetModelTester(self )
| 297
| 0
|
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def UpperCamelCase( __UpperCamelCase : Tuple ):
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = image.size
lowerCAmelCase_ , lowerCAmelCase_ : int = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase_ : Optional[Any] = image.resize((w, h) ,resample=PIL_INTERPOLATION['''lanczos'''] )
lowerCAmelCase_ : List[Any] = np.array(__UpperCamelCase ).astype(np.floataa ) / 2_5_5.0
lowerCAmelCase_ : Union[str, Any] = image[None].transpose(0 ,3 ,1 ,2 )
lowerCAmelCase_ : int = torch.from_numpy(__UpperCamelCase )
return 2.0 * image - 1.0
class __snake_case ( UpperCamelCase_ ):
def __init__( self : List[Any] , A_ : VQModel , A_ : UNetaDModel , A_ : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
super().__init__()
self.register_modules(vqvae=A_ , unet=A_ , scheduler=A_)
@torch.no_grad()
def __call__( self : Union[str, Any] , A_ : Union[torch.Tensor, PIL.Image.Image] = None , A_ : Optional[int] = 1 , A_ : Optional[int] = 1_0_0 , A_ : Optional[float] = 0.0 , A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , A_ : Optional[str] = "pil" , A_ : bool = True , ):
if isinstance(A_ , PIL.Image.Image):
lowerCAmelCase_ : Dict = 1
elif isinstance(A_ , torch.Tensor):
lowerCAmelCase_ : Dict = image.shape[0]
else:
raise ValueError(F"""`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(A_)}""")
if isinstance(A_ , PIL.Image.Image):
lowerCAmelCase_ : int = preprocess(A_)
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase_ : List[Any] = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase_ : Tuple = next(self.unet.parameters()).dtype
lowerCAmelCase_ : List[str] = randn_tensor(A_ , generator=A_ , device=self.device , dtype=A_)
lowerCAmelCase_ : int = image.to(device=self.device , dtype=A_)
# set timesteps and move to the correct device
self.scheduler.set_timesteps(A_ , device=self.device)
lowerCAmelCase_ : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase_ : Dict = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase_ : Optional[Any] = '''eta''' in set(inspect.signature(self.scheduler.step).parameters.keys())
lowerCAmelCase_ : Dict = {}
if accepts_eta:
lowerCAmelCase_ : Union[str, Any] = eta
for t in self.progress_bar(A_):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase_ : str = torch.cat([latents, image] , dim=1)
lowerCAmelCase_ : str = self.scheduler.scale_model_input(A_ , A_)
# predict the noise residual
lowerCAmelCase_ : Union[str, Any] = self.unet(A_ , A_).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase_ : Any = self.scheduler.step(A_ , A_ , A_ , **A_).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase_ : List[Any] = self.vqvae.decode(A_).sample
lowerCAmelCase_ : Tuple = torch.clamp(A_ , -1.0 , 1.0)
lowerCAmelCase_ : Union[str, Any] = image / 2 + 0.5
lowerCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
lowerCAmelCase_ : int = self.numpy_to_pil(A_)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_)
| 103
|
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class __snake_case :
def __init__( self : Tuple , A_ : Any , A_ : Tuple=1_3 , A_ : str=7 , A_ : Any=True , A_ : Union[str, Any]=True , A_ : int=False , A_ : int=True , A_ : List[Any]=9_9 , A_ : Dict=6_4 , A_ : int=5 , A_ : List[Any]=4 , A_ : Optional[Any]=6_4 , A_ : str="gelu" , A_ : Union[str, Any]=0.1 , A_ : List[Any]=0.1 , A_ : Any=5_1_2 , A_ : Union[str, Any]=1_6 , A_ : str=2 , A_ : Any=0.02 , A_ : str=3 , A_ : Optional[int]=4 , A_ : int=None , ):
lowerCAmelCase_ : List[Any] = parent
lowerCAmelCase_ : List[Any] = batch_size
lowerCAmelCase_ : List[Any] = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : Union[str, Any] = use_input_mask
lowerCAmelCase_ : Tuple = use_token_type_ids
lowerCAmelCase_ : List[Any] = use_labels
lowerCAmelCase_ : int = vocab_size
lowerCAmelCase_ : Union[str, Any] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : List[str] = num_attention_heads
lowerCAmelCase_ : List[str] = intermediate_size
lowerCAmelCase_ : int = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase_ : Optional[Any] = max_position_embeddings
lowerCAmelCase_ : Union[str, Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : str = num_labels
lowerCAmelCase_ : List[str] = num_choices
lowerCAmelCase_ : Optional[Any] = scope
def UpperCAmelCase__ ( self : Dict):
return MPNetConfig.from_pretrained('''microsoft/mpnet-base''')
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowerCAmelCase_ : int = None
if self.use_input_mask:
lowerCAmelCase_ : List[str] = random_attention_mask([self.batch_size, self.seq_length])
lowerCAmelCase_ : Any = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices)
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Dict , A_ : Dict , A_ : int , A_ : Tuple , A_ : List[str] , A_ : str , A_ : List[Any]):
lowerCAmelCase_ : int = MPNetModel(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Any = model(A_ , A_)
lowerCAmelCase_ : Union[str, Any] = model(A_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size))
def UpperCAmelCase__ ( self : List[str] , A_ : Union[str, Any] , A_ : List[Any] , A_ : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : Any):
lowerCAmelCase_ : Any = MPNetForQuestionAnswering(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : int = model(
A_ , attention_mask=A_ , start_positions=A_ , end_positions=A_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple , A_ : List[str] , A_ : Optional[Any] , A_ : Dict , A_ : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : Tuple = self.num_labels
lowerCAmelCase_ : Any = MPNetForSequenceClassification(A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Dict = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple , A_ : Dict , A_ : Tuple , A_ : Dict , A_ : List[str] , A_ : List[Any]):
lowerCAmelCase_ : int = self.num_choices
lowerCAmelCase_ : List[str] = MPNetForMultipleChoice(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : int = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
lowerCAmelCase_ : Optional[int] = model(
A_ , attention_mask=A_ , labels=A_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Any , A_ : int , A_ : Any , A_ : List[Any] , A_ : Any , A_ : Union[str, Any]):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Tuple = MPNetForTokenClassification(config=A_)
model.to(A_)
model.eval()
lowerCAmelCase_ : Optional[int] = model(A_ , attention_mask=A_ , labels=A_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
((lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_) , (lowerCAmelCase_)) : Union[str, Any] = config_and_inputs
lowerCAmelCase_ : List[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( UpperCamelCase_ ,UpperCamelCase_ ,unittest.TestCase ):
_a = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
_a = (
{
'''feature-extraction''': MPNetModel,
'''fill-mask''': MPNetForMaskedLM,
'''question-answering''': MPNetForQuestionAnswering,
'''text-classification''': MPNetForSequenceClassification,
'''token-classification''': MPNetForTokenClassification,
'''zero-shot''': MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = True
def UpperCAmelCase__ ( self : Union[str, Any]):
lowerCAmelCase_ : List[Any] = MPNetModelTester(self)
lowerCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=A_ , hidden_size=3_7)
def UpperCAmelCase__ ( self : Any):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any]):
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*A_)
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*A_)
def UpperCAmelCase__ ( self : int):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*A_)
def UpperCAmelCase__ ( self : str):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*A_)
@require_torch
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ : Union[str, Any] = MPNetModel.from_pretrained('''microsoft/mpnet-base''')
lowerCAmelCase_ : Optional[Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]])
lowerCAmelCase_ : Union[str, Any] = model(A_)[0]
lowerCAmelCase_ : Optional[int] = torch.Size((1, 1_1, 7_6_8))
self.assertEqual(output.shape , A_)
lowerCAmelCase_ : Tuple = torch.tensor(
[[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]])
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , A_ , atol=1e-4))
| 103
| 1
|
"""simple docstring"""
import math
import os
import sys
def __A ( a_ :str) -> str:
__a : List[str] = ''''''
try:
with open(a_ , '''rb''') as binary_file:
__a : List[Any] = binary_file.read()
for dat in data:
__a : List[str] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('''File not accessible''')
sys.exit()
def __A ( a_ :dict[str, str] , a_ :str , a_ :int , a_ :str) -> None:
lexicon.pop(a_)
__a : int = last_match_id
if math.loga(a_).is_integer():
for curr_key in lexicon:
__a : Optional[Any] = '''0''' + lexicon[curr_key]
__a : int = bin(a_)[2:]
def __A ( a_ :str) -> str:
__a : Optional[int] = {'''0''': '''0''', '''1''': '''1'''}
__a , __a : int = '''''', ''''''
__a : Any = len(a_)
for i in range(len(a_)):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__a : Union[str, Any] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(a_ , a_ , a_ , a_)
index += 1
__a : str = ''''''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__a : str = lexicon[curr_string]
result += last_match_id
return result
def __A ( a_ :str , a_ :str) -> str:
__a : Optional[Any] = os.path.getsize(a_)
__a : str = bin(a_)[2:]
__a : Optional[Any] = len(a_)
return "0" * (length_length - 1) + file_length_binary + compressed
def __A ( a_ :str , a_ :str) -> None:
__a : Tuple = 8
try:
with open(a_ , '''wb''') as opened_file:
__a : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(a_) , a_)
]
if len(result_byte_array[-1]) % byte_length == 0:
result_byte_array.append('''10000000''')
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1]) - 1
)
for elem in result_byte_array:
opened_file.write(int(a_ , 2).to_bytes(1 , byteorder='''big'''))
except OSError:
print('''File not accessible''')
sys.exit()
def __A ( a_ :str , a_ :str) -> None:
__a : Dict = read_file_binary(a_)
__a : List[str] = compress_data(a_)
__a : int = add_file_length(a_ , a_)
write_file_binary(a_ , a_)
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 188
|
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def __A ( a_ :List[Any]=None , a_ :Tuple=None) -> List[Any]:
return field(default_factory=lambda: default , metadata=a_)
@dataclass
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = field(
metadata={'''help''': '''The csv file to plot.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether to plot along batch size or sequence length. Defaults to sequence length.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Whether the csv file has time results or memory results. Defaults to memory results.'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Disable logarithmic scale when plotting'''} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={
'''help''': '''Whether the csv file has training results or inference results. Defaults to inference results.'''
} , )
__lowerCAmelCase = field(
default=_UpperCamelCase , metadata={'''help''': '''Filename under which the plot will be saved. If unused no plot is saved.'''} , )
__lowerCAmelCase = list_field(
default=_UpperCamelCase , metadata={'''help''': '''List of model names that are used instead of the ones in the csv file.'''} )
def __A ( a_ :Optional[Any]) -> Any:
try:
int(a_)
return True
except ValueError:
return False
def __A ( a_ :List[Any]) -> Any:
try:
float(a_)
return True
except ValueError:
return False
class __lowercase :
'''simple docstring'''
def __init__( self , _UpperCAmelCase ):
__a : Dict = args
__a : Tuple = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline='''''' ) as csv_file:
__a : int = csv.DictReader(_UpperCAmelCase )
for row in reader:
__a : Union[str, Any] = row['''model''']
self.result_dict[model_name]["bsz"].append(int(row['''batch_size'''] ) )
self.result_dict[model_name]["seq_len"].append(int(row['''sequence_length'''] ) )
if can_convert_to_int(row['''result'''] ):
# value is not None
__a : Optional[int] = int(row['''result'''] )
elif can_convert_to_float(row['''result'''] ):
# value is not None
__a : Optional[Any] = float(row['''result'''] )
def _lowerCamelCase ( self ):
__a , __a : Optional[int] = plt.subplots()
__a : str = '''Time usage''' if self.args.is_time else '''Memory usage'''
__a : str = title_str + ''' for training''' if self.args.is_train else title_str + ''' for inference'''
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale('''log''' )
ax.set_yscale('''log''' )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
__a : str = sorted(set(self.result_dict[model_name]['''bsz'''] ) )
__a : Dict = sorted(set(self.result_dict[model_name]['''seq_len'''] ) )
__a : Dict = self.result_dict[model_name]['''result''']
((__a) , (__a)) : List[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
__a : Any = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
__a : Optional[int] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=_UpperCAmelCase , )
else:
__a : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((__a) , (__a)) : Union[str, Any] = (
('''batch_size''', '''len''') if self.args.plot_along_batch else ('''in #tokens''', '''bsz''')
)
__a : Any = np.asarray(_UpperCAmelCase , _UpperCAmelCase )[: len(_UpperCAmelCase )]
plt.scatter(
_UpperCAmelCase , _UpperCAmelCase , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(_UpperCAmelCase , _UpperCAmelCase , '''--''' )
title_str += f""" {label_model_name} vs."""
__a : Optional[Any] = title_str[:-4]
__a : Optional[int] = '''Time in s''' if self.args.is_time else '''Memory in MB'''
# plot
plt.title(_UpperCAmelCase )
plt.xlabel(_UpperCAmelCase )
plt.ylabel(_UpperCAmelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def __A ( ) -> List[str]:
__a : List[str] = HfArgumentParser(a_)
__a : Optional[int] = parser.parse_args_into_dataclasses()[0]
__a : Tuple = Plot(args=a_)
plot.plot()
if __name__ == "__main__":
main()
| 188
| 1
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
SCREAMING_SNAKE_CASE = get_tests_dir("fixtures")
class UpperCAmelCase_ ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> str:
'''simple docstring'''
A__ = mock.Mock()
A__ = 500
A__ = {}
A__ = HTTPError
A__ = {}
# Download this model to make sure it's in the cache.
A__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("requests.Session.request" , return_value=snake_case_ ) as mock_head:
A__ = ViTImageProcessor.from_pretrained("hf-internal-testing/tiny-random-vit" )
# This check we did call the fake head request
mock_head.assert_called()
def __magic_name__ ( self : Tuple ) -> str:
'''simple docstring'''
A__ = ViTImageProcessor.from_pretrained(
"https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json" )
def __magic_name__ ( self : Any ) -> Any:
'''simple docstring'''
with self.assertRaises(snake_case_ ):
# config is in subfolder, the following should not work without specifying the subfolder
A__ = AutoImageProcessor.from_pretrained("hf-internal-testing/stable-diffusion-all-variants" )
A__ = AutoImageProcessor.from_pretrained(
"hf-internal-testing/stable-diffusion-all-variants" , subfolder="feature_extractor" )
self.assertIsNotNone(snake_case_ )
@is_staging_test
class UpperCAmelCase_ ( unittest.TestCase ):
@classmethod
def __magic_name__ ( cls : str ) -> Optional[Any]:
'''simple docstring'''
A__ = TOKEN
HfFolder.save_token(snake_case_ )
@classmethod
def __magic_name__ ( cls : Optional[int] ) -> List[str]:
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-image-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-image-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-image-processor" )
except HTTPError:
pass
def __magic_name__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ = ViTImageProcessor.from_pretrained(snake_case_ )
image_processor.push_to_hub("test-image-processor" , use_auth_token=self._token )
A__ = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
snake_case_ , repo_id="test-image-processor" , push_to_hub=snake_case_ , use_auth_token=self._token )
A__ = ViTImageProcessor.from_pretrained(F"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def __magic_name__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
A__ = ViTImageProcessor.from_pretrained(snake_case_ )
image_processor.push_to_hub("valid_org/test-image-processor" , use_auth_token=self._token )
A__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor" )
for k, v in image_processor.__dict__.items():
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-image-processor" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
snake_case_ , repo_id="valid_org/test-image-processor-org" , push_to_hub=snake_case_ , use_auth_token=self._token )
A__ = ViTImageProcessor.from_pretrained("valid_org/test-image-processor-org" )
for k, v in image_processor.__dict__.items():
self.assertEqual(snake_case_ , getattr(snake_case_ , snake_case_ ) )
def __magic_name__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
CustomImageProcessor.register_for_auto_class()
A__ = CustomImageProcessor.from_pretrained(snake_case_ )
image_processor.push_to_hub("test-dynamic-image-processor" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {"AutoImageProcessor": "custom_image_processing.CustomImageProcessor"} , )
A__ = AutoImageProcessor.from_pretrained(
F"""{USER}/test-dynamic-image-processor""" , trust_remote_code=snake_case_ )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , "CustomImageProcessor" )
| 247
|
"""simple docstring"""
import qiskit
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> qiskit.result.counts.Counts:
A__ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
A__ = qiskit.QuantumCircuit(lowercase_ , lowercase_ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
A__ = qiskit.execute(lowercase_ , lowercase_ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(lowercase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = single_qubit_measure(2, 2)
print(f'Total count for various states are: {counts}')
| 247
| 1
|
'''simple docstring'''
def a ( __a = 100 ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ :Optional[int] = set()
UpperCamelCase__ :Dict = 0
UpperCamelCase__ :List[str] = n + 1 # maximum limit
for a in range(2 , lowerCAmelCase__ ):
for b in range(2 , lowerCAmelCase__ ):
UpperCamelCase__ :Tuple = a**b # calculates the current power
collect_powers.add(lowerCAmelCase__ ) # adds the result to the set
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print('''Number of terms ''', solution(int(str(input()).strip())))
| 366
|
'''simple docstring'''
from __future__ import annotations
import math
def a ( __a ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = str(__a )
UpperCamelCase__ :Dict = [n]
for i in range(1 , len(__a ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a ( __a ) -> bool:
'''simple docstring'''
if len(str(__a ) ) > 3:
if not is_prime(int(str(__a )[-3:] ) ) or not is_prime(int(str(__a )[:3] ) ):
return False
return True
def a ( __a = 11 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :list[int] = []
UpperCamelCase__ :int = 13
while len(__a ) != count:
if validate(__a ):
UpperCamelCase__ :Optional[int] = list_truncated_nums(__a )
if all(is_prime(__a ) for i in list_nums ):
list_truncated_primes.append(__a )
num += 2
return list_truncated_primes
def a ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""")
| 219
| 0
|
'''simple docstring'''
from typing import Optional, Union
import torch
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_mobilenet_va import MobileNetVaConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = "MobileNetV1Config"
# Base docstring
UpperCamelCase_ = "google/mobilenet_v1_1.0_224"
UpperCamelCase_ = [1, 1_0_2_4, 7, 7]
# Image classification docstring
UpperCamelCase_ = "google/mobilenet_v1_1.0_224"
UpperCamelCase_ = "tabby, tabby cat"
UpperCamelCase_ = [
"google/mobilenet_v1_1.0_224",
"google/mobilenet_v1_0.75_192",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
]
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: Union[str, Any] ,__UpperCamelCase: List[str]=None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = {}
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Dict = model.mobilenet_va
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = model
SCREAMING_SNAKE_CASE : List[str] = 'MobilenetV1/Conv2d_0/'
SCREAMING_SNAKE_CASE : Optional[Any] = backbone.conv_stem.convolution.weight
SCREAMING_SNAKE_CASE : str = backbone.conv_stem.normalization.bias
SCREAMING_SNAKE_CASE : List[Any] = backbone.conv_stem.normalization.weight
SCREAMING_SNAKE_CASE : Optional[Any] = backbone.conv_stem.normalization.running_mean
SCREAMING_SNAKE_CASE : Dict = backbone.conv_stem.normalization.running_var
for i in range(13 ):
SCREAMING_SNAKE_CASE : Optional[int] = i + 1
SCREAMING_SNAKE_CASE : Union[str, Any] = i * 2
SCREAMING_SNAKE_CASE : List[Any] = backbone.layer[pt_index]
SCREAMING_SNAKE_CASE : Tuple = f"MobilenetV1/Conv2d_{tf_index}_depthwise/"
SCREAMING_SNAKE_CASE : Any = pointer.convolution.weight
SCREAMING_SNAKE_CASE : str = pointer.normalization.bias
SCREAMING_SNAKE_CASE : str = pointer.normalization.weight
SCREAMING_SNAKE_CASE : int = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : List[str] = pointer.normalization.running_var
SCREAMING_SNAKE_CASE : Dict = backbone.layer[pt_index + 1]
SCREAMING_SNAKE_CASE : List[Any] = f"MobilenetV1/Conv2d_{tf_index}_pointwise/"
SCREAMING_SNAKE_CASE : Optional[int] = pointer.convolution.weight
SCREAMING_SNAKE_CASE : str = pointer.normalization.bias
SCREAMING_SNAKE_CASE : str = pointer.normalization.weight
SCREAMING_SNAKE_CASE : Optional[Any] = pointer.normalization.running_mean
SCREAMING_SNAKE_CASE : Optional[int] = pointer.normalization.running_var
if isinstance(__UpperCamelCase ,__UpperCamelCase ):
SCREAMING_SNAKE_CASE : Any = 'MobilenetV1/Logits/Conv2d_1c_1x1/'
SCREAMING_SNAKE_CASE : Dict = model.classifier.weight
SCREAMING_SNAKE_CASE : int = model.classifier.bias
return tf_to_pt_map
def lowercase__( __UpperCamelCase: Any ,__UpperCamelCase: Optional[int] ,__UpperCamelCase: Optional[int] ):
"""simple docstring"""
try:
import numpy as np
import tensorflow as tf
except ImportError:
logger.error(
'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see '
'https://www.tensorflow.org/install/ for installation instructions.' )
raise
# Load weights from TF model
SCREAMING_SNAKE_CASE : List[Any] = tf.train.list_variables(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Optional[Any] = {}
for name, shape in init_vars:
logger.info(f"Loading TF weight {name} with shape {shape}" )
SCREAMING_SNAKE_CASE : Optional[Any] = tf.train.load_variable(__UpperCamelCase ,__UpperCamelCase )
SCREAMING_SNAKE_CASE : str = array
# Build TF to PyTorch weights loading map
SCREAMING_SNAKE_CASE : List[str] = _build_tf_to_pytorch_map(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
for name, pointer in tf_to_pt_map.items():
logger.info(f"Importing {name}" )
if name not in tf_weights:
logger.info(f"{name} not in tf pre-trained weights, skipping" )
continue
SCREAMING_SNAKE_CASE : List[Any] = tf_weights[name]
if "depthwise_weights" in name:
logger.info('Transposing depthwise' )
SCREAMING_SNAKE_CASE : int = np.transpose(__UpperCamelCase ,(2, 3, 0, 1) )
elif "weights" in name:
logger.info('Transposing' )
if len(pointer.shape ) == 2: # copying into linear layer
SCREAMING_SNAKE_CASE : List[Any] = array.squeeze().transpose()
else:
SCREAMING_SNAKE_CASE : Any = np.transpose(__UpperCamelCase ,(3, 2, 0, 1) )
if pointer.shape != array.shape:
raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" )
logger.info(f"Initialize PyTorch weight {name} {array.shape}" )
SCREAMING_SNAKE_CASE : str = torch.from_numpy(__UpperCamelCase )
tf_weights.pop(__UpperCamelCase ,__UpperCamelCase )
tf_weights.pop(name + '/RMSProp' ,__UpperCamelCase )
tf_weights.pop(name + '/RMSProp_1' ,__UpperCamelCase )
tf_weights.pop(name + '/ExponentialMovingAverage' ,__UpperCamelCase )
logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" )
return model
def lowercase__( __UpperCamelCase: torch.Tensor ,__UpperCamelCase: nn.Convad ):
"""simple docstring"""
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = features.shape[-2:]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : List[str] = conv_layer.stride
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = conv_layer.kernel_size
if in_height % stride_height == 0:
SCREAMING_SNAKE_CASE : int = max(kernel_height - stride_height ,0 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = max(kernel_height - (in_height % stride_height) ,0 )
if in_width % stride_width == 0:
SCREAMING_SNAKE_CASE : Optional[int] = max(kernel_width - stride_width ,0 )
else:
SCREAMING_SNAKE_CASE : int = max(kernel_width - (in_width % stride_width) ,0 )
SCREAMING_SNAKE_CASE : Optional[int] = pad_along_width // 2
SCREAMING_SNAKE_CASE : Optional[Any] = pad_along_width - pad_left
SCREAMING_SNAKE_CASE : Union[str, Any] = pad_along_height // 2
SCREAMING_SNAKE_CASE : Any = pad_along_height - pad_top
SCREAMING_SNAKE_CASE : Optional[Any] = (pad_left, pad_right, pad_top, pad_bottom)
return nn.functional.pad(__UpperCamelCase ,__UpperCamelCase ,'constant' ,0.0 )
class _a ( nn.Module ):
'''simple docstring'''
def __init__( self, A, A, A, A, A = 1, A = 1, A = False, A = True, A = True, ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = config
if in_channels % groups != 0:
raise ValueError(F"Input channels ({in_channels}) are not divisible by {groups} groups." )
if out_channels % groups != 0:
raise ValueError(F"Output channels ({out_channels}) are not divisible by {groups} groups." )
SCREAMING_SNAKE_CASE : Optional[Any] = 0 if config.tf_padding else int((kernel_size - 1) / 2 )
SCREAMING_SNAKE_CASE : Optional[Any] = nn.Convad(
in_channels=A, out_channels=A, kernel_size=A, stride=A, padding=A, groups=A, bias=A, padding_mode='zeros', )
if use_normalization:
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.BatchNormad(
num_features=A, eps=config.layer_norm_eps, momentum=0.99_97, affine=A, track_running_stats=A, )
else:
SCREAMING_SNAKE_CASE : Dict = None
if use_activation:
if isinstance(A, A ):
SCREAMING_SNAKE_CASE : Tuple = ACTaFN[use_activation]
elif isinstance(config.hidden_act, A ):
SCREAMING_SNAKE_CASE : List[str] = ACTaFN[config.hidden_act]
else:
SCREAMING_SNAKE_CASE : Tuple = config.hidden_act
else:
SCREAMING_SNAKE_CASE : List[str] = None
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if self.config.tf_padding:
SCREAMING_SNAKE_CASE : Optional[Any] = apply_tf_padding(A, self.convolution )
SCREAMING_SNAKE_CASE : str = self.convolution(A )
if self.normalization is not None:
SCREAMING_SNAKE_CASE : Dict = self.normalization(A )
if self.activation is not None:
SCREAMING_SNAKE_CASE : int = self.activation(A )
return features
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : str = MobileNetVaConfig
A : Tuple = load_tf_weights_in_mobilenet_va
A : Any = '''mobilenet_v1'''
A : List[Any] = '''pixel_values'''
A : Dict = False
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
if isinstance(A, (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(A, nn.BatchNormad ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
UpperCamelCase_ = R"\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
UpperCamelCase_ = R"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`MobileNetV1ImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.''' , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A, A = True ):
'''simple docstring'''
super().__init__(A )
SCREAMING_SNAKE_CASE : int = config
SCREAMING_SNAKE_CASE : List[str] = 32
SCREAMING_SNAKE_CASE : Optional[int] = max(int(depth * config.depth_multiplier ), config.min_depth )
SCREAMING_SNAKE_CASE : Union[str, Any] = MobileNetVaConvLayer(
A, in_channels=config.num_channels, out_channels=A, kernel_size=3, stride=2, )
SCREAMING_SNAKE_CASE : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1]
SCREAMING_SNAKE_CASE : List[Any] = nn.ModuleList()
for i in range(13 ):
SCREAMING_SNAKE_CASE : Optional[Any] = out_channels
if strides[i] == 2 or i == 0:
depth *= 2
SCREAMING_SNAKE_CASE : Optional[int] = max(int(depth * config.depth_multiplier ), config.min_depth )
self.layer.append(
MobileNetVaConvLayer(
A, in_channels=A, out_channels=A, kernel_size=3, stride=strides[i], groups=A, ) )
self.layer.append(
MobileNetVaConvLayer(
A, in_channels=A, out_channels=A, kernel_size=1, ) )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def UpperCamelCase_ ( self, A ):
'''simple docstring'''
raise NotImplementedError
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC, output_type=A, config_class=_CONFIG_FOR_DOC, modality='vision', expected_output=_EXPECTED_OUTPUT_SHAPE, )
def UpperCamelCase_ ( self, A = None, A = None, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE : Any = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
SCREAMING_SNAKE_CASE : int = self.conv_stem(A )
SCREAMING_SNAKE_CASE : Dict = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer ):
SCREAMING_SNAKE_CASE : str = layer_module(A )
if output_hidden_states:
SCREAMING_SNAKE_CASE : Tuple = all_hidden_states + (hidden_states,)
SCREAMING_SNAKE_CASE : Dict = hidden_states
if self.pooler is not None:
SCREAMING_SNAKE_CASE : Optional[int] = torch.flatten(self.pooler(A ), start_dim=1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None )
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=A, pooler_output=A, hidden_states=A, )
@add_start_docstrings(
'''
MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
''' , SCREAMING_SNAKE_CASE , )
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self, A ):
'''simple docstring'''
super().__init__(A )
SCREAMING_SNAKE_CASE : Optional[int] = config.num_labels
SCREAMING_SNAKE_CASE : Any = MobileNetVaModel(A )
SCREAMING_SNAKE_CASE : Tuple = self.mobilenet_va.layer[-1].convolution.out_channels
# Classifier head
SCREAMING_SNAKE_CASE : Tuple = nn.Dropout(config.classifier_dropout_prob, inplace=A )
SCREAMING_SNAKE_CASE : Tuple = nn.Linear(A, config.num_labels ) if config.num_labels > 0 else nn.Identity()
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(A )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT, output_type=A, config_class=_CONFIG_FOR_DOC, expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT, )
def UpperCamelCase_ ( self, A = None, A = None, A = None, A = None, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE : Tuple = self.mobilenet_va(A, output_hidden_states=A, return_dict=A )
SCREAMING_SNAKE_CASE : Any = outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE : Optional[int] = self.classifier(self.dropout(A ) )
SCREAMING_SNAKE_CASE : List[str] = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE : str = 'single_label_classification'
else:
SCREAMING_SNAKE_CASE : str = 'multi_label_classification'
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE : str = MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(logits.squeeze(), labels.squeeze() )
else:
SCREAMING_SNAKE_CASE : List[str] = loss_fct(A, A )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE : Tuple = CrossEntropyLoss()
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(logits.view(-1, self.num_labels ), labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE : str = BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE : Union[str, Any] = loss_fct(A, A )
if not return_dict:
SCREAMING_SNAKE_CASE : Any = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(
loss=A, logits=A, hidden_states=outputs.hidden_states, )
| 251
|
'''simple docstring'''
import os
import pytest
import yaml
from datasets.features.features import Features, Value
from datasets.info import DatasetInfo, DatasetInfosDict
@pytest.mark.parametrize(
'files' ,[
['full:README.md', 'dataset_infos.json'],
['empty:README.md', 'dataset_infos.json'],
['dataset_infos.json'],
['full:README.md'],
] ,)
def lowercase__( __UpperCamelCase: int ,__UpperCamelCase: Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = tmp_path_factory.mktemp('dset_infos_dir' )
if "full:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('---\ndataset_info:\n dataset_size: 42\n---' )
if "empty:README.md" in files:
with open(dataset_infos_dir / 'README.md' ,'w' ) as f:
f.write('' )
# we want to support dataset_infos.json for backward compatibility
if "dataset_infos.json" in files:
with open(dataset_infos_dir / 'dataset_infos.json' ,'w' ) as f:
f.write('{"default": {"dataset_size": 42}}' )
SCREAMING_SNAKE_CASE : int = DatasetInfosDict.from_directory(__UpperCamelCase )
assert dataset_infos
assert dataset_infos["default"].dataset_size == 42
@pytest.mark.parametrize(
'dataset_info' ,[
DatasetInfo(),
DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,),
] ,)
def lowercase__( __UpperCamelCase: List[Any] ,__UpperCamelCase: DatasetInfo ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = str(__UpperCamelCase )
dataset_info.write_to_directory(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = DatasetInfo.from_directory(__UpperCamelCase )
assert dataset_info == reloaded
assert os.path.exists(os.path.join(__UpperCamelCase ,'dataset_info.json' ) )
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = DatasetInfo(
description='foo' ,citation='bar' ,homepage='https://foo.bar' ,license='CC0' ,features=Features({'a': Value('int32' )} ) ,post_processed={} ,supervised_keys=() ,task_templates=[] ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train', 'num_examples': 42}] ,download_checksums={} ,download_size=13_37 ,post_processing_size=4_42 ,dataset_size=12_34 ,size_in_bytes=13_37 + 4_42 + 12_34 ,)
SCREAMING_SNAKE_CASE : List[Any] = dataset_info._to_yaml_dict()
assert sorted(__UpperCamelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML )
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
assert key in dataset_info_yaml_dict
assert isinstance(dataset_info_yaml_dict[key] ,(list, dict, int, str) )
SCREAMING_SNAKE_CASE : Dict = yaml.safe_dump(__UpperCamelCase )
SCREAMING_SNAKE_CASE : Tuple = yaml.safe_load(__UpperCamelCase )
assert dataset_info_yaml_dict == reloaded
def lowercase__( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = DatasetInfo()
SCREAMING_SNAKE_CASE : Optional[int] = dataset_info._to_yaml_dict()
assert dataset_info_yaml_dict == {}
@pytest.mark.parametrize(
'dataset_infos_dict' ,[
DatasetInfosDict(),
DatasetInfosDict({'default': DatasetInfo()} ),
DatasetInfosDict({'my_config_name': DatasetInfo()} ),
DatasetInfosDict(
{
'default': DatasetInfo(
description='foo' ,features=Features({'a': Value('int32' )} ) ,builder_name='builder' ,config_name='config' ,version='1.0.0' ,splits=[{'name': 'train'}] ,download_size=42 ,)
} ),
DatasetInfosDict(
{
'v1': DatasetInfo(dataset_size=42 ),
'v2': DatasetInfo(dataset_size=13_37 ),
} ),
] ,)
def lowercase__( __UpperCamelCase: Optional[Any] ,__UpperCamelCase: DatasetInfosDict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = str(__UpperCamelCase )
dataset_infos_dict.write_to_directory(__UpperCamelCase )
SCREAMING_SNAKE_CASE : List[str] = DatasetInfosDict.from_directory(__UpperCamelCase )
# the config_name of the dataset_infos_dict take over the attribute
for config_name, dataset_info in dataset_infos_dict.items():
SCREAMING_SNAKE_CASE : str = config_name
# the yaml representation doesn't include fields like description or citation
# so we just test that we can recover what we can from the yaml
SCREAMING_SNAKE_CASE : Optional[int] = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() )
assert dataset_infos_dict == reloaded
if dataset_infos_dict:
assert os.path.exists(os.path.join(__UpperCamelCase ,'README.md' ) )
| 251
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class _UpperCAmelCase ( snake_case_ ):
snake_case = '''Salesforce/blip-image-captioning-base'''
snake_case = (
'''This is a tool that generates a description of an image. It takes an input named `image` which should be the '''
'''image to caption, and returns a text that contains the description in English.'''
)
snake_case = '''image_captioner'''
snake_case = AutoModelForVisionaSeq
snake_case = ['''image''']
snake_case = ['''text''']
def __init__( self : Union[str, Any] , *__UpperCAmelCase : str , **__UpperCAmelCase : Dict ):
'''simple docstring'''
requires_backends(self , ["vision"] )
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : int , __UpperCAmelCase : "Image" ):
'''simple docstring'''
return self.pre_processor(images=__UpperCAmelCase , return_tensors="pt" )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : Tuple ):
'''simple docstring'''
return self.model.generate(**__UpperCAmelCase )
def lowerCAmelCase ( self : Tuple , __UpperCAmelCase : str ):
'''simple docstring'''
return self.pre_processor.batch_decode(__UpperCAmelCase , skip_special_tokens=__UpperCAmelCase )[0].strip()
| 356
|
'''simple docstring'''
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : Dict , __UpperCAmelCase : Union[str, Any]="" , __UpperCAmelCase : List[str]="train" ):
'''simple docstring'''
assert os.path.isdir(__UpperCAmelCase )
_A = []
_A = os.listdir(__UpperCAmelCase )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
_A = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not os.path.isfile(__UpperCAmelCase ):
continue
self.documents.append(__UpperCAmelCase )
def __len__( self : str ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Union[str, Any] , __UpperCAmelCase : str ):
'''simple docstring'''
_A = self.documents[idx]
_A = document_path.split("/" )[-1]
with open(__UpperCAmelCase , encoding="utf-8" ) as source:
_A = source.read()
_A , _A = process_story(__UpperCAmelCase )
return document_name, story_lines, summary_lines
def __lowercase ( __lowercase ) -> Optional[Any]:
'''simple docstring'''
_A = list(filter(lambda __lowercase : len(__lowercase ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) )
# for some unknown reason some lines miss a period, add it
_A = [_add_missing_period(__lowercase ) for line in nonempty_lines]
# gather article lines
_A = []
_A = deque(__lowercase )
while True:
try:
_A = lines.popleft()
if element.startswith("@highlight" ):
break
story_lines.append(__lowercase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
_A = list(filter(lambda __lowercase : not t.startswith("@highlight" ) , __lowercase ) )
return story_lines, summary_lines
def __lowercase ( __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"]
if line.startswith("@highlight" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
if len(__lowercase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(__lowercase )) )
return sequence
def __lowercase ( __lowercase , __lowercase ) -> Optional[int]:
'''simple docstring'''
_A = torch.ones_like(__lowercase )
_A = sequence == pad_token_id
_A = 0
return mask
def __lowercase ( __lowercase , __lowercase , __lowercase ) -> str:
'''simple docstring'''
_A = [tokenizer.encode(__lowercase ) for line in story_lines]
_A = [token for sentence in story_lines_token_ids for token in sentence]
_A = [tokenizer.encode(__lowercase ) for line in summary_lines]
_A = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def __lowercase ( __lowercase , __lowercase ) -> List[str]:
'''simple docstring'''
_A = []
for sequence in batch:
_A = -1
_A = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(__lowercase )
return torch.tensor(__lowercase )
| 174
| 0
|
"""simple docstring"""
def _lowerCAmelCase ( UpperCamelCase_ = 100 ):
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 100
|
'''simple docstring'''
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.esm.modeling_esmfold import EsmForProteinFolding
class a__:
def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ):
a : Tuple = parent
a : List[str] = batch_size
a : Optional[Any] = seq_length
a : Tuple = is_training
a : Optional[Any] = use_input_mask
a : List[Any] = use_token_type_ids
a : List[Any] = use_labels
a : int = vocab_size
a : Union[str, Any] = hidden_size
a : Any = num_hidden_layers
a : List[str] = num_attention_heads
a : int = intermediate_size
a : str = hidden_act
a : Tuple = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : List[str] = max_position_embeddings
a : Any = type_vocab_size
a : List[str] = type_sequence_label_size
a : Union[str, Any] = initializer_range
a : Optional[int] = num_labels
a : Optional[Any] = num_choices
a : Optional[int] = scope
def lowercase_ ( self : List[Any] ):
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a : Dict = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
a : Optional[Any] = None
a : Optional[int] = None
a : Dict = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
a : List[str] = ids_tensor([self.batch_size] , self.num_choices )
a : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase_ ( self : List[Any] ):
a : Any = EsmConfig(
vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , )
return config
def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ):
a : Tuple = EsmForProteinFolding(config=__snake_case ).float()
model.to(__snake_case )
model.eval()
a : Dict = model(__snake_case , attention_mask=__snake_case )
a : Union[str, Any] = model(__snake_case )
a : List[Any] = model(__snake_case )
self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) )
self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) )
def lowercase_ ( self : Optional[Any] ):
a : Tuple = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
lowercase__ = False
lowercase__ = (EsmForProteinFolding,) if is_torch_available() else ()
lowercase__ = ()
lowercase__ = {} if is_torch_available() else {}
lowercase__ = False
def lowercase_ ( self : int ):
a : Tuple = EsmFoldModelTester(self )
a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def lowercase_ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase_ ( self : Union[str, Any] ):
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
@unittest.skip('Does not support attention outputs' )
def lowercase_ ( self : str ):
pass
@unittest.skip
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Optional[int] ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support passing input embeds!' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold does not support head pruning.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMfold does not output hidden states in the normal way.' )
def lowercase_ ( self : int ):
pass
@unittest.skip('ESMFold only has one output format.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' )
def lowercase_ ( self : Tuple ):
pass
@unittest.skip('ESMFold does not support input chunking.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' )
def lowercase_ ( self : List[Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : Any ):
pass
@unittest.skip('ESMFold doesn\'t support torchscript compilation.' )
def lowercase_ ( self : List[str] ):
pass
@unittest.skip('ESMFold doesn\'t support data parallel.' )
def lowercase_ ( self : Dict ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowercase_ ( self : Union[str, Any] ):
pass
@require_torch
class a__( lowerCamelCase__ ):
@slow
def lowercase_ ( self : Optional[int] ):
a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float()
model.eval()
a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
a : Any = model(__snake_case )['positions']
a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa )
self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
| 297
| 0
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
_lowerCamelCase = torch.device("""cpu""")
def a__ ( ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase_ : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase_ : int = dct.pop(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : int = val
def a__ ( _SCREAMING_SNAKE_CASE : Dict ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = []
for k in state_dict.keys():
UpperCAmelCase_ : Dict = k
if ".pwconv" in k:
UpperCAmelCase_ : int = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
UpperCAmelCase_ : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
UpperCAmelCase_ : Any = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
UpperCAmelCase_ : List[str] = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
UpperCAmelCase_ : List[Any] = k_new.split("." )
if ls[2].isdigit():
UpperCAmelCase_ : str = '''swiftformer.encoder.network.''' + ls[1] + '''.blocks.''' + ls[2] + '''.''' + '''.'''.join(ls[3:] )
else:
UpperCAmelCase_ : int = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
UpperCAmelCase_ : Any = 10_00
UpperCAmelCase_ : Any = '''huggingface/label-files'''
UpperCAmelCase_ : List[str] = '''imagenet-1k-id2label.json'''
UpperCAmelCase_ : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ : Any = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
UpperCAmelCase_ : Dict = idalabel
UpperCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
UpperCAmelCase_ : List[Any] = [3, 3, 6, 4]
UpperCAmelCase_ : str = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
UpperCAmelCase_ : List[Any] = [3, 3, 9, 6]
UpperCAmelCase_ : Optional[Any] = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
UpperCAmelCase_ : Optional[int] = [4, 3, 10, 5]
UpperCAmelCase_ : Optional[int] = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
UpperCAmelCase_ : Union[str, Any] = [4, 4, 12, 6]
UpperCAmelCase_ : List[Any] = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
UpperCAmelCase_ : Tuple = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" , check_hash=SCREAMING_SNAKE_CASE__ )
else:
UpperCAmelCase_ : int = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )
UpperCAmelCase_ : Any = checkpoint
UpperCAmelCase_ : Union[str, Any] = create_rename_keys(SCREAMING_SNAKE_CASE__ )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
UpperCAmelCase_ : int = SwiftFormerForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
hf_model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# prepare test inputs
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : str = ViTImageProcessor.from_pretrained("preprocessor_config" )
UpperCAmelCase_ : Dict = processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
# compare outputs from both models
UpperCAmelCase_ : Union[str, Any] = get_expected_output(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase_ : Any = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
_lowerCamelCase = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 366
|
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", f"""encoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias"""))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", f"""decoder.layers.{i}.self_attn.out_proj.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""")
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
)
)
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
)
)
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias"""))
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""")
)
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""")
)
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight"""))
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias"""))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def a__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : Tuple = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = val
def a__ ( _SCREAMING_SNAKE_CASE : int ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : Dict = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
UpperCAmelCase_ : Optional[int] = key.replace("backbone.0.body" , "backbone.conv_encoder.model" )
UpperCAmelCase_ : Union[str, Any] = value
else:
UpperCAmelCase_ : int = value
return new_state_dict
def a__ ( _SCREAMING_SNAKE_CASE : List[str] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Dict = ""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : List[Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56]
UpperCAmelCase_ : Dict = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : Dict = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : Dict = in_proj_bias[-2_56:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ : Optional[int] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ : str = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ : Union[str, Any] = in_proj_weight[:2_56, :]
UpperCAmelCase_ : Optional[int] = in_proj_bias[:2_56]
UpperCAmelCase_ : Optional[Any] = in_proj_weight[2_56:5_12, :]
UpperCAmelCase_ : List[str] = in_proj_bias[2_56:5_12]
UpperCAmelCase_ : Optional[int] = in_proj_weight[-2_56:, :]
UpperCAmelCase_ : List[Any] = in_proj_bias[-2_56:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ : int = state_dict.pop(
F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase_ : Union[str, Any] = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ : List[str] = in_proj_weight_cross_attn[:2_56, :]
UpperCAmelCase_ : Dict = in_proj_bias_cross_attn[:2_56]
UpperCAmelCase_ : List[Any] = in_proj_weight_cross_attn[2_56:5_12, :]
UpperCAmelCase_ : int = in_proj_bias_cross_attn[2_56:5_12]
UpperCAmelCase_ : int = in_proj_weight_cross_attn[-2_56:, :]
UpperCAmelCase_ : str = in_proj_bias_cross_attn[-2_56:]
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Tuple ) -> Any:
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = image.size
UpperCAmelCase_ : int = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = 8_00 if "detection" in checkpoint_url else 10_00
UpperCAmelCase_ : str = target_max_size / current_max_size
UpperCAmelCase_ : Tuple = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def a__ ( _SCREAMING_SNAKE_CASE : Any ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Any = F.to_tensor(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[Any] = F.normalize(_SCREAMING_SNAKE_CASE , mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] )
return image
@torch.no_grad()
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : str ) -> Any:
"""simple docstring"""
logger.info("Converting model..." )
# load original state dict
UpperCAmelCase_ : Union[str, Any] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )
# rename keys
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = rename_backbone_keys(_SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(_SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ : Any = "model."
for key in state_dict.copy().keys():
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ : Optional[Any] = state_dict.pop(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = val
# create HuggingFace model and load state dict
UpperCAmelCase_ : str = TableTransformerConfig(
backbone="resnet18" , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
UpperCAmelCase_ : str = 15
UpperCAmelCase_ : str = 2
UpperCAmelCase_ : Union[str, Any] = {0: "table", 1: "table rotated"}
UpperCAmelCase_ : Tuple = idalabel
UpperCAmelCase_ : List[str] = {v: k for k, v in idalabel.items()}
else:
UpperCAmelCase_ : Tuple = 1_25
UpperCAmelCase_ : Tuple = 6
UpperCAmelCase_ : Union[str, Any] = {
0: "table",
1: "table column",
2: "table row",
3: "table column header",
4: "table projected row header",
5: "table spanning cell",
}
UpperCAmelCase_ : str = idalabel
UpperCAmelCase_ : Any = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ : List[Any] = DetrImageProcessor(
format="coco_detection" , max_size=8_00 if "detection" in checkpoint_url else 10_00 )
UpperCAmelCase_ : Optional[int] = TableTransformerForObjectDetection(_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion
UpperCAmelCase_ : Optional[Any] = "example_pdf.png" if "detection" in checkpoint_url else "example_table.png"
UpperCAmelCase_ : Dict = hf_hub_download(repo_id="nielsr/example-pdf" , repo_type="dataset" , filename=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = Image.open(_SCREAMING_SNAKE_CASE ).convert("RGB" )
UpperCAmelCase_ : int = normalize(resize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ).unsqueeze(0 )
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
if "detection" in checkpoint_url:
UpperCAmelCase_ : Any = (1, 15, 3)
UpperCAmelCase_ : Optional[int] = torch.tensor(
[[-6.7_897, -16.9_985, 6.7_937], [-8.0_186, -22.2_192, 6.9_677], [-7.3_117, -21.0_708, 7.4_055]] )
UpperCAmelCase_ : Dict = torch.tensor([[0.4_867, 0.1_767, 0.6_732], [0.6_718, 0.4_479, 0.3_830], [0.4_716, 0.1_760, 0.6_364]] )
else:
UpperCAmelCase_ : Union[str, Any] = (1, 1_25, 7)
UpperCAmelCase_ : List[str] = torch.tensor(
[[-18.1_430, -8.3_214, 4.8_274], [-18.4_685, -7.1_361, -4.2_667], [-26.3_693, -9.3_429, -4.9_962]] )
UpperCAmelCase_ : Any = torch.tensor([[0.4_983, 0.5_595, 0.9_440], [0.4_916, 0.6_315, 0.5_954], [0.6_108, 0.8_637, 0.1_135]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
# Push model to HF hub
logger.info("Pushing model to the hub..." )
UpperCAmelCase_ : List[str] = (
"microsoft/table-transformer-detection"
if "detection" in checkpoint_url
else "microsoft/table-transformer-structure-recognition"
)
model.push_to_hub(_SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 67
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.